From 09646a012aba42575ed8ea66c91811f04aebcae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?colin=20axn=C3=A9r?= <25233464+colin-axner@users.noreply.github.com> Date: Mon, 22 Feb 2021 15:34:34 +0100 Subject: [PATCH] Initial Setup + Protobuf files (#2) * initial setup Add CODEOWNERS, go.mod file, copy Makefile from SDK, add proto files for client with updated import/package name, modified API route to dropt v1beta1 to v1, get third_party vendor files * generate proto files * add more proto files * add connection and commitment proto files * finish adding proto files * make proto-all, remove unnecessary script, add buf file * bump sdk to master, copy code directly * make proto-all --- .github/CODEOWNERS | 3 + Makefile | 475 + applications/transfer/client/cli/cli.go | 42 + applications/transfer/client/cli/query.go | 108 + applications/transfer/client/cli/tx.go | 117 + applications/transfer/handler.go | 23 + applications/transfer/handler_test.go | 123 + applications/transfer/keeper/MBT_README.md | 51 + applications/transfer/keeper/encoding.go | 35 + applications/transfer/keeper/genesis.go | 45 + applications/transfer/keeper/genesis_test.go | 39 + applications/transfer/keeper/grpc_query.go | 83 + .../transfer/keeper/grpc_query_test.go | 142 + applications/transfer/keeper/keeper.go | 169 + applications/transfer/keeper/keeper_test.go | 51 + .../transfer/keeper/mbt_relay_test.go | 378 + .../model_based_tests/Test5Packets.json | 492 + .../keeper/model_based_tests/Test5Packets.tla | 1056 +++ .../Test5PacketsAllDifferentPass.json | 612 ++ .../Test5PacketsAllDifferentPass.tla | 1188 +++ .../TestOnRecvAcknowledgementErrorFail.json | 58 + .../TestOnRecvAcknowledgementErrorFail.tla | 159 + .../TestOnRecvAcknowledgementErrorPass.json | 159 + .../TestOnRecvAcknowledgementErrorPass.tla | 310 + .../TestOnRecvAcknowledgementResultFail.json | 58 + .../TestOnRecvAcknowledgementResultFail.tla | 159 + .../TestOnRecvAcknowledgementResultPass.json | 58 + .../TestOnRecvAcknowledgementResultPass.tla | 159 + .../TestOnRecvPacketFail.json | 58 + .../TestOnRecvPacketFail.tla | 159 + .../TestOnRecvPacketPass.json | 73 + .../TestOnRecvPacketPass.tla | 174 + .../model_based_tests/TestOnTimeoutFail.json | 58 + .../model_based_tests/TestOnTimeoutFail.tla | 159 + .../model_based_tests/TestOnTimeoutPass.json | 159 + .../model_based_tests/TestOnTimeoutPass.tla | 310 + .../TestSendTransferFail.json | 58 + .../TestSendTransferFail.tla | 159 + .../TestSendTransferPass.json | 174 + .../TestSendTransferPass.tla | 323 + .../model_based_tests/TestUnescrowTokens.json | 305 + .../model_based_tests/TestUnescrowTokens.tla | 563 ++ applications/transfer/keeper/msg_server.go | 43 + applications/transfer/keeper/params.go | 30 + applications/transfer/keeper/params_test.go | 15 + applications/transfer/keeper/relay.go | 406 + .../transfer/keeper/relay_model/account.tla | 36 + .../keeper/relay_model/account_record.tla | 46 + .../relay_model/apalache-to-relay-test.json | 100 + .../relay_model/apalache-to-relay-test2.json | 104 + .../transfer/keeper/relay_model/denom.tla | 50 + .../keeper/relay_model/denom_record.tla | 53 + .../keeper/relay_model/denom_record2.tla | 114 + .../keeper/relay_model/denom_sequence.tla | 47 + .../keeper/relay_model/identifiers.tla | 10 + .../transfer/keeper/relay_model/relay.tla | 278 + .../keeper/relay_model/relay_tests.tla | 96 + applications/transfer/keeper/relay_test.go | 392 + applications/transfer/module.go | 438 + applications/transfer/module_test.go | 246 + applications/transfer/simulation/decoder.go | 33 + .../transfer/simulation/decoder_test.go | 59 + applications/transfer/simulation/genesis.go | 54 + .../transfer/simulation/genesis_test.go | 74 + applications/transfer/simulation/params.go | 32 + .../transfer/simulation/params_test.go | 36 + applications/transfer/spec/01_concepts.md | 117 + applications/transfer/spec/02_state.md | 10 + .../transfer/spec/03_state_transitions.md | 36 + applications/transfer/spec/04_messages.md | 40 + applications/transfer/spec/05_events.md | 44 + applications/transfer/spec/06_metrics.md | 14 + applications/transfer/spec/07_params.md | 30 + applications/transfer/spec/README.md | 24 + applications/transfer/types/codec.go | 41 + applications/transfer/types/coin.go | 48 + applications/transfer/types/errors.go | 17 + applications/transfer/types/events.go | 21 + .../transfer/types/expected_keepers.go | 48 + applications/transfer/types/genesis.go | 35 + applications/transfer/types/genesis.pb.go | 443 + applications/transfer/types/genesis_test.go | 47 + applications/transfer/types/keys.go | 55 + applications/transfer/types/keys_test.go | 24 + applications/transfer/types/msgs.go | 85 + applications/transfer/types/msgs_test.go | 103 + applications/transfer/types/packet.go | 56 + applications/transfer/types/packet_test.go | 36 + applications/transfer/types/params.go | 65 + applications/transfer/types/params_test.go | 12 + applications/transfer/types/query.pb.go | 1418 +++ applications/transfer/types/query.pb.gw.go | 326 + applications/transfer/types/trace.go | 203 + applications/transfer/types/trace_test.go | 150 + applications/transfer/types/transfer.pb.go | 909 ++ applications/transfer/types/tx.pb.go | 804 ++ apps/transfer/types/genesis.pb.go | 443 + apps/transfer/types/query.pb.go | 1418 +++ apps/transfer/types/query.pb.gw.go | 326 + apps/transfer/types/transfer.pb.go | 908 ++ apps/transfer/types/tx.pb.go | 801 ++ buf.yaml | 34 + core/02-client/abci.go | 20 + core/02-client/abci_test.go | 60 + core/02-client/client/cli/cli.go | 51 + core/02-client/client/cli/query.go | 260 + core/02-client/client/cli/tx.go | 328 + core/02-client/client/proposal_handler.go | 8 + core/02-client/client/utils/utils.go | 199 + core/02-client/doc.go | 10 + core/02-client/genesis.go | 69 + core/02-client/keeper/client.go | 192 + core/02-client/keeper/client_test.go | 603 ++ core/02-client/keeper/encoding.go | 42 + core/02-client/keeper/grpc_query.go | 199 + core/02-client/keeper/grpc_query_test.go | 381 + core/02-client/keeper/keeper.go | 367 + core/02-client/keeper/keeper_test.go | 389 + core/02-client/keeper/params.go | 23 + core/02-client/keeper/params_test.go | 17 + core/02-client/keeper/proposal.go | 72 + core/02-client/keeper/proposal_test.go | 130 + core/02-client/module.go | 29 + core/02-client/proposal_handler.go | 22 + core/02-client/proposal_handler_test.go | 84 + core/02-client/simulation/decoder.go | 38 + core/02-client/simulation/decoder_test.go | 70 + core/02-client/simulation/genesis.go | 13 + core/02-client/types/client.go | 111 + core/02-client/types/client.pb.go | 1598 ++++ core/02-client/types/client_test.go | 87 + core/02-client/types/codec.go | 188 + core/02-client/types/codec_test.go | 210 + core/02-client/types/encoding.go | 86 + core/02-client/types/errors.go | 35 + core/02-client/types/events.go | 26 + core/02-client/types/expected_keepers.go | 14 + core/02-client/types/genesis.go | 250 + core/02-client/types/genesis.pb.go | 1060 +++ core/02-client/types/genesis_test.go | 549 ++ core/02-client/types/height.go | 188 + core/02-client/types/height_test.go | 155 + core/02-client/types/keys.go | 65 + core/02-client/types/keys_test.go | 54 + core/02-client/types/msgs.go | 343 + core/02-client/types/msgs_test.go | 619 ++ core/02-client/types/params.go | 71 + core/02-client/types/params_test.go | 30 + core/02-client/types/proposal.go | 64 + core/02-client/types/proposal_test.go | 86 + core/02-client/types/query.go | 65 + core/02-client/types/query.pb.go | 2685 ++++++ core/02-client/types/query.pb.gw.go | 602 ++ core/02-client/types/tx.pb.go | 2074 +++++ core/03-connection/client/cli/cli.go | 46 + core/03-connection/client/cli/query.go | 118 + core/03-connection/client/cli/tx.go | 348 + core/03-connection/client/utils/utils.go | 219 + core/03-connection/genesis.go | 28 + core/03-connection/keeper/grpc_query.go | 179 + core/03-connection/keeper/grpc_query_test.go | 420 + core/03-connection/keeper/handshake.go | 342 + core/03-connection/keeper/handshake_test.go | 701 ++ core/03-connection/keeper/keeper.go | 198 + core/03-connection/keeper/keeper_test.go | 133 + core/03-connection/keeper/verify.go | 225 + core/03-connection/keeper/verify_test.go | 514 ++ core/03-connection/module.go | 29 + core/03-connection/simulation/decoder.go | 32 + core/03-connection/simulation/decoder_test.go | 69 + core/03-connection/simulation/genesis.go | 13 + core/03-connection/types/codec.go | 47 + core/03-connection/types/connection.go | 127 + core/03-connection/types/connection.pb.go | 1801 ++++ core/03-connection/types/connection_test.go | 121 + core/03-connection/types/errors.go | 19 + core/03-connection/types/events.go | 25 + core/03-connection/types/expected_keepers.go | 16 + core/03-connection/types/genesis.go | 76 + core/03-connection/types/genesis.pb.go | 438 + core/03-connection/types/genesis_test.go | 114 + core/03-connection/types/keys.go | 61 + core/03-connection/types/keys_test.go | 49 + core/03-connection/types/msgs.go | 354 + core/03-connection/types/msgs_test.go | 243 + core/03-connection/types/query.go | 70 + core/03-connection/types/query.pb.go | 2892 ++++++ core/03-connection/types/query.pb.gw.go | 602 ++ core/03-connection/types/tx.pb.go | 2782 ++++++ core/03-connection/types/version.go | 220 + core/03-connection/types/version_test.go | 167 + core/04-channel/client/cli/cli.go | 58 + core/04-channel/client/cli/query.go | 457 + core/04-channel/client/cli/tx.go | 288 + core/04-channel/client/utils/utils.go | 301 + core/04-channel/genesis.go | 48 + core/04-channel/handler.go | 186 + core/04-channel/keeper/grpc_query.go | 486 + core/04-channel/keeper/grpc_query_test.go | 1376 +++ core/04-channel/keeper/handshake.go | 496 + core/04-channel/keeper/handshake_test.go | 773 ++ core/04-channel/keeper/keeper.go | 432 + core/04-channel/keeper/keeper_test.go | 329 + core/04-channel/keeper/packet.go | 528 ++ core/04-channel/keeper/packet_test.go | 665 ++ core/04-channel/keeper/timeout.go | 276 + core/04-channel/keeper/timeout_test.go | 351 + core/04-channel/module.go | 29 + core/04-channel/simulation/decoder.go | 48 + core/04-channel/simulation/decoder_test.go | 89 + core/04-channel/simulation/genesis.go | 13 + core/04-channel/types/channel.go | 172 + core/04-channel/types/channel.pb.go | 2270 +++++ core/04-channel/types/channel_test.go | 119 + core/04-channel/types/codec.go | 60 + core/04-channel/types/errors.go | 28 + core/04-channel/types/events.go | 46 + core/04-channel/types/expected_keepers.go | 76 + core/04-channel/types/genesis.go | 156 + core/04-channel/types/genesis.pb.go | 1017 +++ core/04-channel/types/genesis_test.go | 225 + core/04-channel/types/keys.go | 61 + core/04-channel/types/keys_test.go | 47 + core/04-channel/types/msgs.go | 652 ++ core/04-channel/types/msgs_test.go | 446 + core/04-channel/types/packet.go | 112 + core/04-channel/types/packet_test.go | 53 + core/04-channel/types/query.go | 94 + core/04-channel/types/query.pb.go | 7993 +++++++++++++++++ core/04-channel/types/query.pb.gw.go | 1792 ++++ core/04-channel/types/tx.pb.go | 5264 +++++++++++ core/05-port/keeper/keeper.go | 80 + core/05-port/keeper/keeper_test.go | 70 + core/05-port/types/errors.go | 13 + core/05-port/types/keys.go | 15 + core/05-port/types/module.go | 78 + core/05-port/types/router.go | 65 + core/05-port/types/utils.go | 17 + core/23-commitment/types/bench_test.go | 15 + core/23-commitment/types/codec.go | 43 + core/23-commitment/types/commitment.pb.go | 863 ++ core/23-commitment/types/commitment_test.go | 37 + core/23-commitment/types/errors.go | 15 + core/23-commitment/types/merkle.go | 312 + core/23-commitment/types/merkle_test.go | 172 + core/23-commitment/types/utils.go | 28 + core/23-commitment/types/utils_test.go | 98 + core/24-host/errors.go | 15 + core/24-host/keys.go | 235 + core/24-host/parse.go | 79 + core/24-host/parse_test.go | 48 + core/24-host/validate.go | 114 + core/24-host/validate_test.go | 119 + core/client/cli/cli.go | 50 + core/client/query.go | 67 + core/exported/channel.go | 32 + core/exported/client.go | 223 + core/exported/commitment.go | 45 + core/exported/connection.go | 26 + core/genesis.go | 27 + core/genesis_test.go | 370 + core/handler.go | 98 + core/keeper/grpc_query.go | 124 + core/keeper/keeper.go | 65 + core/keeper/msg_server.go | 616 ++ core/keeper/msg_server_test.go | 714 ++ core/module.go | 200 + core/simulation/decoder.go | 32 + core/simulation/decoder_test.go | 80 + core/simulation/genesis.go | 63 + core/simulation/genesis_test.go | 49 + core/spec/01_concepts.md | 405 + core/spec/02_state.md | 28 + core/spec/03_state_transitions.md | 106 + core/spec/04_messages.md | 497 + core/spec/05_callbacks.md | 80 + core/spec/06_events.md | 241 + core/spec/07_params.md | 21 + core/spec/README.md | 26 + core/types/codec.go | 23 + core/types/genesis.go | 38 + core/types/genesis.pb.go | 440 + core/types/query.go | 26 + docs/README.md | 114 + docs/ibc/proto-docs.md | 7521 ++++++++++++++++ docs/protodoc-markdown.tmpl | 0 go.mod | 23 + go.sum | 1035 +++ light-clients/06-solomachine/doc.go | 7 + light-clients/06-solomachine/module.go | 10 + .../06-solomachine/spec/01_concepts.md | 160 + light-clients/06-solomachine/spec/02_state.md | 12 + .../spec/03_state_transitions.md | 39 + .../06-solomachine/spec/04_messages.md | 8 + light-clients/06-solomachine/spec/README.md | 26 + .../06-solomachine/types/client_state.go | 491 + .../06-solomachine/types/client_state_test.go | 912 ++ light-clients/06-solomachine/types/codec.go | 130 + .../06-solomachine/types/codec_test.go | 190 + .../06-solomachine/types/consensus_state.go | 60 + .../types/consensus_state_test.go | 75 + light-clients/06-solomachine/types/errors.go | 18 + light-clients/06-solomachine/types/header.go | 67 + .../06-solomachine/types/header_test.go | 98 + .../06-solomachine/types/misbehaviour.go | 83 + .../types/misbehaviour_handle.go | 92 + .../types/misbehaviour_handle_test.go | 275 + .../06-solomachine/types/misbehaviour_test.go | 132 + light-clients/06-solomachine/types/proof.go | 475 + .../06-solomachine/types/proof_test.go | 102 + .../06-solomachine/types/proposal_handle.go | 64 + .../types/proposal_handle_test.go | 88 + .../06-solomachine/types/solomachine.go | 43 + .../06-solomachine/types/solomachine.pb.go | 4121 +++++++++ .../06-solomachine/types/solomachine_test.go | 113 + light-clients/06-solomachine/types/update.go | 89 + .../06-solomachine/types/update_test.go | 181 + light-clients/07-tendermint/doc.go | 5 + light-clients/07-tendermint/module.go | 10 + .../07-tendermint/types/client_state.go | 532 ++ .../07-tendermint/types/client_state_test.go | 779 ++ light-clients/07-tendermint/types/codec.go | 27 + .../07-tendermint/types/consensus_state.go | 55 + .../types/consensus_state_test.go | 69 + light-clients/07-tendermint/types/errors.go | 25 + light-clients/07-tendermint/types/fraction.go | 25 + light-clients/07-tendermint/types/genesis.go | 21 + .../07-tendermint/types/genesis_test.go | 38 + light-clients/07-tendermint/types/header.go | 83 + .../07-tendermint/types/header_test.go | 82 + .../07-tendermint/types/misbehaviour.go | 141 + .../types/misbehaviour_handle.go | 119 + .../types/misbehaviour_handle_test.go | 372 + .../07-tendermint/types/misbehaviour_test.go | 244 + .../07-tendermint/types/proposal_handle.go | 134 + .../types/proposal_handle_test.go | 387 + light-clients/07-tendermint/types/store.go | 96 + .../07-tendermint/types/store_test.go | 113 + .../07-tendermint/types/tendermint.pb.go | 1917 ++++ .../07-tendermint/types/tendermint_test.go | 95 + light-clients/07-tendermint/types/update.go | 186 + .../07-tendermint/types/update_test.go | 281 + light-clients/07-tendermint/types/upgrade.go | 156 + .../07-tendermint/types/upgrade_test.go | 512 ++ light-clients/09-localhost/doc.go | 5 + light-clients/09-localhost/module.go | 10 + .../09-localhost/types/client_state.go | 346 + .../09-localhost/types/client_state_test.go | 520 ++ light-clients/09-localhost/types/codec.go | 15 + light-clients/09-localhost/types/errors.go | 10 + light-clients/09-localhost/types/keys.go | 6 + .../09-localhost/types/localhost.pb.go | 369 + .../09-localhost/types/localhost_test.go | 43 + proto/ibcgo/apps/transfer/v1/genesis.proto | 19 + proto/ibcgo/apps/transfer/v1/query.proto | 68 + proto/ibcgo/apps/transfer/v1/transfer.proto | 45 + proto/ibcgo/apps/transfer/v1/tx.proto | 48 + proto/ibcgo/core/channel/v1/channel.proto | 157 + proto/ibcgo/core/channel/v1/genesis.proto | 42 + proto/ibcgo/core/channel/v1/query.proto | 389 + proto/ibcgo/core/channel/v1/tx.proto | 239 + proto/ibcgo/core/client/v1/client.proto | 96 + proto/ibcgo/core/client/v1/genesis.proto | 56 + proto/ibcgo/core/client/v1/query.proto | 143 + proto/ibcgo/core/client/v1/tx.proto | 107 + .../ibcgo/core/commitment/v1/commitment.proto | 39 + .../ibcgo/core/connection/v1/connection.proto | 108 + proto/ibcgo/core/connection/v1/genesis.proto | 21 + proto/ibcgo/core/connection/v1/query.proto | 145 + proto/ibcgo/core/connection/v1/tx.proto | 140 + proto/ibcgo/core/types/v1/genesis.proto | 29 + .../lightclients/localhost/v1/localhost.proto | 18 + .../solomachine/v1/solomachine.proto | 206 + .../tendermint/v1/tendermint.proto | 146 + scripts/README.md | 3 + scripts/linkify_changelog.py | 15 + scripts/protoc-swagger-gen.sh | 27 + scripts/protocgen.sh | 40 + testing/chain.go | 910 ++ testing/chain_test.go | 47 + testing/coordinator.go | 700 ++ testing/mock/README.md | 6 + testing/mock/doc.go | 9 + testing/mock/mock.go | 188 + testing/mock/privval.go | 50 + testing/mock/privval_test.go | 44 + testing/solomachine.go | 321 + testing/types.go | 44 + third_party/proto/confio/proofs.proto | 234 + .../base/query/v1beta1/pagination.proto | 50 + .../proto/cosmos/base/v1beta1/coin.proto | 40 + third_party/proto/gogoproto/gogo.proto | 145 + .../proto/google/api/annotations.proto | 31 + third_party/proto/google/api/http.proto | 318 + third_party/proto/google/protobuf/any.proto | 161 + .../proto/tendermint/crypto/keys.proto | 17 + .../proto/tendermint/crypto/proof.proto | 41 + .../proto/tendermint/libs/bits/types.proto | 9 + .../proto/tendermint/types/types.proto | 157 + .../proto/tendermint/types/validator.proto | 25 + .../proto/tendermint/version/types.proto | 24 + 401 files changed, 115750 insertions(+) create mode 100644 .github/CODEOWNERS create mode 100644 Makefile create mode 100644 applications/transfer/client/cli/cli.go create mode 100644 applications/transfer/client/cli/query.go create mode 100644 applications/transfer/client/cli/tx.go create mode 100644 applications/transfer/handler.go create mode 100644 applications/transfer/handler_test.go create mode 100644 applications/transfer/keeper/MBT_README.md create mode 100644 applications/transfer/keeper/encoding.go create mode 100644 applications/transfer/keeper/genesis.go create mode 100644 applications/transfer/keeper/genesis_test.go create mode 100644 applications/transfer/keeper/grpc_query.go create mode 100644 applications/transfer/keeper/grpc_query_test.go create mode 100644 applications/transfer/keeper/keeper.go create mode 100644 applications/transfer/keeper/keeper_test.go create mode 100644 applications/transfer/keeper/mbt_relay_test.go create mode 100644 applications/transfer/keeper/model_based_tests/Test5Packets.json create mode 100644 applications/transfer/keeper/model_based_tests/Test5Packets.tla create mode 100644 applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json create mode 100644 applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json create mode 100644 applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json create mode 100644 applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json create mode 100644 applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestSendTransferFail.json create mode 100644 applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestSendTransferPass.json create mode 100644 applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla create mode 100644 applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json create mode 100644 applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla create mode 100644 applications/transfer/keeper/msg_server.go create mode 100644 applications/transfer/keeper/params.go create mode 100644 applications/transfer/keeper/params_test.go create mode 100644 applications/transfer/keeper/relay.go create mode 100644 applications/transfer/keeper/relay_model/account.tla create mode 100644 applications/transfer/keeper/relay_model/account_record.tla create mode 100644 applications/transfer/keeper/relay_model/apalache-to-relay-test.json create mode 100644 applications/transfer/keeper/relay_model/apalache-to-relay-test2.json create mode 100644 applications/transfer/keeper/relay_model/denom.tla create mode 100644 applications/transfer/keeper/relay_model/denom_record.tla create mode 100644 applications/transfer/keeper/relay_model/denom_record2.tla create mode 100644 applications/transfer/keeper/relay_model/denom_sequence.tla create mode 100644 applications/transfer/keeper/relay_model/identifiers.tla create mode 100644 applications/transfer/keeper/relay_model/relay.tla create mode 100644 applications/transfer/keeper/relay_model/relay_tests.tla create mode 100644 applications/transfer/keeper/relay_test.go create mode 100644 applications/transfer/module.go create mode 100644 applications/transfer/module_test.go create mode 100644 applications/transfer/simulation/decoder.go create mode 100644 applications/transfer/simulation/decoder_test.go create mode 100644 applications/transfer/simulation/genesis.go create mode 100644 applications/transfer/simulation/genesis_test.go create mode 100644 applications/transfer/simulation/params.go create mode 100644 applications/transfer/simulation/params_test.go create mode 100644 applications/transfer/spec/01_concepts.md create mode 100644 applications/transfer/spec/02_state.md create mode 100644 applications/transfer/spec/03_state_transitions.md create mode 100644 applications/transfer/spec/04_messages.md create mode 100644 applications/transfer/spec/05_events.md create mode 100644 applications/transfer/spec/06_metrics.md create mode 100644 applications/transfer/spec/07_params.md create mode 100644 applications/transfer/spec/README.md create mode 100644 applications/transfer/types/codec.go create mode 100644 applications/transfer/types/coin.go create mode 100644 applications/transfer/types/errors.go create mode 100644 applications/transfer/types/events.go create mode 100644 applications/transfer/types/expected_keepers.go create mode 100644 applications/transfer/types/genesis.go create mode 100644 applications/transfer/types/genesis.pb.go create mode 100644 applications/transfer/types/genesis_test.go create mode 100644 applications/transfer/types/keys.go create mode 100644 applications/transfer/types/keys_test.go create mode 100644 applications/transfer/types/msgs.go create mode 100644 applications/transfer/types/msgs_test.go create mode 100644 applications/transfer/types/packet.go create mode 100644 applications/transfer/types/packet_test.go create mode 100644 applications/transfer/types/params.go create mode 100644 applications/transfer/types/params_test.go create mode 100644 applications/transfer/types/query.pb.go create mode 100644 applications/transfer/types/query.pb.gw.go create mode 100644 applications/transfer/types/trace.go create mode 100644 applications/transfer/types/trace_test.go create mode 100644 applications/transfer/types/transfer.pb.go create mode 100644 applications/transfer/types/tx.pb.go create mode 100644 apps/transfer/types/genesis.pb.go create mode 100644 apps/transfer/types/query.pb.go create mode 100644 apps/transfer/types/query.pb.gw.go create mode 100644 apps/transfer/types/transfer.pb.go create mode 100644 apps/transfer/types/tx.pb.go create mode 100644 buf.yaml create mode 100644 core/02-client/abci.go create mode 100644 core/02-client/abci_test.go create mode 100644 core/02-client/client/cli/cli.go create mode 100644 core/02-client/client/cli/query.go create mode 100644 core/02-client/client/cli/tx.go create mode 100644 core/02-client/client/proposal_handler.go create mode 100644 core/02-client/client/utils/utils.go create mode 100644 core/02-client/doc.go create mode 100644 core/02-client/genesis.go create mode 100644 core/02-client/keeper/client.go create mode 100644 core/02-client/keeper/client_test.go create mode 100644 core/02-client/keeper/encoding.go create mode 100644 core/02-client/keeper/grpc_query.go create mode 100644 core/02-client/keeper/grpc_query_test.go create mode 100644 core/02-client/keeper/keeper.go create mode 100644 core/02-client/keeper/keeper_test.go create mode 100644 core/02-client/keeper/params.go create mode 100644 core/02-client/keeper/params_test.go create mode 100644 core/02-client/keeper/proposal.go create mode 100644 core/02-client/keeper/proposal_test.go create mode 100644 core/02-client/module.go create mode 100644 core/02-client/proposal_handler.go create mode 100644 core/02-client/proposal_handler_test.go create mode 100644 core/02-client/simulation/decoder.go create mode 100644 core/02-client/simulation/decoder_test.go create mode 100644 core/02-client/simulation/genesis.go create mode 100644 core/02-client/types/client.go create mode 100644 core/02-client/types/client.pb.go create mode 100644 core/02-client/types/client_test.go create mode 100644 core/02-client/types/codec.go create mode 100644 core/02-client/types/codec_test.go create mode 100644 core/02-client/types/encoding.go create mode 100644 core/02-client/types/errors.go create mode 100644 core/02-client/types/events.go create mode 100644 core/02-client/types/expected_keepers.go create mode 100644 core/02-client/types/genesis.go create mode 100644 core/02-client/types/genesis.pb.go create mode 100644 core/02-client/types/genesis_test.go create mode 100644 core/02-client/types/height.go create mode 100644 core/02-client/types/height_test.go create mode 100644 core/02-client/types/keys.go create mode 100644 core/02-client/types/keys_test.go create mode 100644 core/02-client/types/msgs.go create mode 100644 core/02-client/types/msgs_test.go create mode 100644 core/02-client/types/params.go create mode 100644 core/02-client/types/params_test.go create mode 100644 core/02-client/types/proposal.go create mode 100644 core/02-client/types/proposal_test.go create mode 100644 core/02-client/types/query.go create mode 100644 core/02-client/types/query.pb.go create mode 100644 core/02-client/types/query.pb.gw.go create mode 100644 core/02-client/types/tx.pb.go create mode 100644 core/03-connection/client/cli/cli.go create mode 100644 core/03-connection/client/cli/query.go create mode 100644 core/03-connection/client/cli/tx.go create mode 100644 core/03-connection/client/utils/utils.go create mode 100644 core/03-connection/genesis.go create mode 100644 core/03-connection/keeper/grpc_query.go create mode 100644 core/03-connection/keeper/grpc_query_test.go create mode 100644 core/03-connection/keeper/handshake.go create mode 100644 core/03-connection/keeper/handshake_test.go create mode 100644 core/03-connection/keeper/keeper.go create mode 100644 core/03-connection/keeper/keeper_test.go create mode 100644 core/03-connection/keeper/verify.go create mode 100644 core/03-connection/keeper/verify_test.go create mode 100644 core/03-connection/module.go create mode 100644 core/03-connection/simulation/decoder.go create mode 100644 core/03-connection/simulation/decoder_test.go create mode 100644 core/03-connection/simulation/genesis.go create mode 100644 core/03-connection/types/codec.go create mode 100644 core/03-connection/types/connection.go create mode 100644 core/03-connection/types/connection.pb.go create mode 100644 core/03-connection/types/connection_test.go create mode 100644 core/03-connection/types/errors.go create mode 100644 core/03-connection/types/events.go create mode 100644 core/03-connection/types/expected_keepers.go create mode 100644 core/03-connection/types/genesis.go create mode 100644 core/03-connection/types/genesis.pb.go create mode 100644 core/03-connection/types/genesis_test.go create mode 100644 core/03-connection/types/keys.go create mode 100644 core/03-connection/types/keys_test.go create mode 100644 core/03-connection/types/msgs.go create mode 100644 core/03-connection/types/msgs_test.go create mode 100644 core/03-connection/types/query.go create mode 100644 core/03-connection/types/query.pb.go create mode 100644 core/03-connection/types/query.pb.gw.go create mode 100644 core/03-connection/types/tx.pb.go create mode 100644 core/03-connection/types/version.go create mode 100644 core/03-connection/types/version_test.go create mode 100644 core/04-channel/client/cli/cli.go create mode 100644 core/04-channel/client/cli/query.go create mode 100644 core/04-channel/client/cli/tx.go create mode 100644 core/04-channel/client/utils/utils.go create mode 100644 core/04-channel/genesis.go create mode 100644 core/04-channel/handler.go create mode 100644 core/04-channel/keeper/grpc_query.go create mode 100644 core/04-channel/keeper/grpc_query_test.go create mode 100644 core/04-channel/keeper/handshake.go create mode 100644 core/04-channel/keeper/handshake_test.go create mode 100644 core/04-channel/keeper/keeper.go create mode 100644 core/04-channel/keeper/keeper_test.go create mode 100644 core/04-channel/keeper/packet.go create mode 100644 core/04-channel/keeper/packet_test.go create mode 100644 core/04-channel/keeper/timeout.go create mode 100644 core/04-channel/keeper/timeout_test.go create mode 100644 core/04-channel/module.go create mode 100644 core/04-channel/simulation/decoder.go create mode 100644 core/04-channel/simulation/decoder_test.go create mode 100644 core/04-channel/simulation/genesis.go create mode 100644 core/04-channel/types/channel.go create mode 100644 core/04-channel/types/channel.pb.go create mode 100644 core/04-channel/types/channel_test.go create mode 100644 core/04-channel/types/codec.go create mode 100644 core/04-channel/types/errors.go create mode 100644 core/04-channel/types/events.go create mode 100644 core/04-channel/types/expected_keepers.go create mode 100644 core/04-channel/types/genesis.go create mode 100644 core/04-channel/types/genesis.pb.go create mode 100644 core/04-channel/types/genesis_test.go create mode 100644 core/04-channel/types/keys.go create mode 100644 core/04-channel/types/keys_test.go create mode 100644 core/04-channel/types/msgs.go create mode 100644 core/04-channel/types/msgs_test.go create mode 100644 core/04-channel/types/packet.go create mode 100644 core/04-channel/types/packet_test.go create mode 100644 core/04-channel/types/query.go create mode 100644 core/04-channel/types/query.pb.go create mode 100644 core/04-channel/types/query.pb.gw.go create mode 100644 core/04-channel/types/tx.pb.go create mode 100644 core/05-port/keeper/keeper.go create mode 100644 core/05-port/keeper/keeper_test.go create mode 100644 core/05-port/types/errors.go create mode 100644 core/05-port/types/keys.go create mode 100644 core/05-port/types/module.go create mode 100644 core/05-port/types/router.go create mode 100644 core/05-port/types/utils.go create mode 100644 core/23-commitment/types/bench_test.go create mode 100644 core/23-commitment/types/codec.go create mode 100644 core/23-commitment/types/commitment.pb.go create mode 100644 core/23-commitment/types/commitment_test.go create mode 100644 core/23-commitment/types/errors.go create mode 100644 core/23-commitment/types/merkle.go create mode 100644 core/23-commitment/types/merkle_test.go create mode 100644 core/23-commitment/types/utils.go create mode 100644 core/23-commitment/types/utils_test.go create mode 100644 core/24-host/errors.go create mode 100644 core/24-host/keys.go create mode 100644 core/24-host/parse.go create mode 100644 core/24-host/parse_test.go create mode 100644 core/24-host/validate.go create mode 100644 core/24-host/validate_test.go create mode 100644 core/client/cli/cli.go create mode 100644 core/client/query.go create mode 100644 core/exported/channel.go create mode 100644 core/exported/client.go create mode 100644 core/exported/commitment.go create mode 100644 core/exported/connection.go create mode 100644 core/genesis.go create mode 100644 core/genesis_test.go create mode 100644 core/handler.go create mode 100644 core/keeper/grpc_query.go create mode 100644 core/keeper/keeper.go create mode 100644 core/keeper/msg_server.go create mode 100644 core/keeper/msg_server_test.go create mode 100644 core/module.go create mode 100644 core/simulation/decoder.go create mode 100644 core/simulation/decoder_test.go create mode 100644 core/simulation/genesis.go create mode 100644 core/simulation/genesis_test.go create mode 100644 core/spec/01_concepts.md create mode 100644 core/spec/02_state.md create mode 100644 core/spec/03_state_transitions.md create mode 100644 core/spec/04_messages.md create mode 100644 core/spec/05_callbacks.md create mode 100644 core/spec/06_events.md create mode 100644 core/spec/07_params.md create mode 100644 core/spec/README.md create mode 100644 core/types/codec.go create mode 100644 core/types/genesis.go create mode 100644 core/types/genesis.pb.go create mode 100644 core/types/query.go create mode 100644 docs/README.md create mode 100644 docs/ibc/proto-docs.md create mode 100644 docs/protodoc-markdown.tmpl create mode 100644 go.mod create mode 100644 go.sum create mode 100644 light-clients/06-solomachine/doc.go create mode 100644 light-clients/06-solomachine/module.go create mode 100644 light-clients/06-solomachine/spec/01_concepts.md create mode 100644 light-clients/06-solomachine/spec/02_state.md create mode 100644 light-clients/06-solomachine/spec/03_state_transitions.md create mode 100644 light-clients/06-solomachine/spec/04_messages.md create mode 100644 light-clients/06-solomachine/spec/README.md create mode 100644 light-clients/06-solomachine/types/client_state.go create mode 100644 light-clients/06-solomachine/types/client_state_test.go create mode 100644 light-clients/06-solomachine/types/codec.go create mode 100644 light-clients/06-solomachine/types/codec_test.go create mode 100644 light-clients/06-solomachine/types/consensus_state.go create mode 100644 light-clients/06-solomachine/types/consensus_state_test.go create mode 100644 light-clients/06-solomachine/types/errors.go create mode 100644 light-clients/06-solomachine/types/header.go create mode 100644 light-clients/06-solomachine/types/header_test.go create mode 100644 light-clients/06-solomachine/types/misbehaviour.go create mode 100644 light-clients/06-solomachine/types/misbehaviour_handle.go create mode 100644 light-clients/06-solomachine/types/misbehaviour_handle_test.go create mode 100644 light-clients/06-solomachine/types/misbehaviour_test.go create mode 100644 light-clients/06-solomachine/types/proof.go create mode 100644 light-clients/06-solomachine/types/proof_test.go create mode 100644 light-clients/06-solomachine/types/proposal_handle.go create mode 100644 light-clients/06-solomachine/types/proposal_handle_test.go create mode 100644 light-clients/06-solomachine/types/solomachine.go create mode 100644 light-clients/06-solomachine/types/solomachine.pb.go create mode 100644 light-clients/06-solomachine/types/solomachine_test.go create mode 100644 light-clients/06-solomachine/types/update.go create mode 100644 light-clients/06-solomachine/types/update_test.go create mode 100644 light-clients/07-tendermint/doc.go create mode 100644 light-clients/07-tendermint/module.go create mode 100644 light-clients/07-tendermint/types/client_state.go create mode 100644 light-clients/07-tendermint/types/client_state_test.go create mode 100644 light-clients/07-tendermint/types/codec.go create mode 100644 light-clients/07-tendermint/types/consensus_state.go create mode 100644 light-clients/07-tendermint/types/consensus_state_test.go create mode 100644 light-clients/07-tendermint/types/errors.go create mode 100644 light-clients/07-tendermint/types/fraction.go create mode 100644 light-clients/07-tendermint/types/genesis.go create mode 100644 light-clients/07-tendermint/types/genesis_test.go create mode 100644 light-clients/07-tendermint/types/header.go create mode 100644 light-clients/07-tendermint/types/header_test.go create mode 100644 light-clients/07-tendermint/types/misbehaviour.go create mode 100644 light-clients/07-tendermint/types/misbehaviour_handle.go create mode 100644 light-clients/07-tendermint/types/misbehaviour_handle_test.go create mode 100644 light-clients/07-tendermint/types/misbehaviour_test.go create mode 100644 light-clients/07-tendermint/types/proposal_handle.go create mode 100644 light-clients/07-tendermint/types/proposal_handle_test.go create mode 100644 light-clients/07-tendermint/types/store.go create mode 100644 light-clients/07-tendermint/types/store_test.go create mode 100644 light-clients/07-tendermint/types/tendermint.pb.go create mode 100644 light-clients/07-tendermint/types/tendermint_test.go create mode 100644 light-clients/07-tendermint/types/update.go create mode 100644 light-clients/07-tendermint/types/update_test.go create mode 100644 light-clients/07-tendermint/types/upgrade.go create mode 100644 light-clients/07-tendermint/types/upgrade_test.go create mode 100644 light-clients/09-localhost/doc.go create mode 100644 light-clients/09-localhost/module.go create mode 100644 light-clients/09-localhost/types/client_state.go create mode 100644 light-clients/09-localhost/types/client_state_test.go create mode 100644 light-clients/09-localhost/types/codec.go create mode 100644 light-clients/09-localhost/types/errors.go create mode 100644 light-clients/09-localhost/types/keys.go create mode 100644 light-clients/09-localhost/types/localhost.pb.go create mode 100644 light-clients/09-localhost/types/localhost_test.go create mode 100644 proto/ibcgo/apps/transfer/v1/genesis.proto create mode 100644 proto/ibcgo/apps/transfer/v1/query.proto create mode 100644 proto/ibcgo/apps/transfer/v1/transfer.proto create mode 100644 proto/ibcgo/apps/transfer/v1/tx.proto create mode 100644 proto/ibcgo/core/channel/v1/channel.proto create mode 100644 proto/ibcgo/core/channel/v1/genesis.proto create mode 100644 proto/ibcgo/core/channel/v1/query.proto create mode 100644 proto/ibcgo/core/channel/v1/tx.proto create mode 100644 proto/ibcgo/core/client/v1/client.proto create mode 100644 proto/ibcgo/core/client/v1/genesis.proto create mode 100644 proto/ibcgo/core/client/v1/query.proto create mode 100644 proto/ibcgo/core/client/v1/tx.proto create mode 100644 proto/ibcgo/core/commitment/v1/commitment.proto create mode 100644 proto/ibcgo/core/connection/v1/connection.proto create mode 100644 proto/ibcgo/core/connection/v1/genesis.proto create mode 100644 proto/ibcgo/core/connection/v1/query.proto create mode 100644 proto/ibcgo/core/connection/v1/tx.proto create mode 100644 proto/ibcgo/core/types/v1/genesis.proto create mode 100644 proto/ibcgo/lightclients/localhost/v1/localhost.proto create mode 100644 proto/ibcgo/lightclients/solomachine/v1/solomachine.proto create mode 100644 proto/ibcgo/lightclients/tendermint/v1/tendermint.proto create mode 100644 scripts/README.md create mode 100644 scripts/linkify_changelog.py create mode 100755 scripts/protoc-swagger-gen.sh create mode 100755 scripts/protocgen.sh create mode 100644 testing/chain.go create mode 100644 testing/chain_test.go create mode 100644 testing/coordinator.go create mode 100644 testing/mock/README.md create mode 100644 testing/mock/doc.go create mode 100644 testing/mock/mock.go create mode 100644 testing/mock/privval.go create mode 100644 testing/mock/privval_test.go create mode 100644 testing/solomachine.go create mode 100644 testing/types.go create mode 100644 third_party/proto/confio/proofs.proto create mode 100644 third_party/proto/cosmos/base/query/v1beta1/pagination.proto create mode 100644 third_party/proto/cosmos/base/v1beta1/coin.proto create mode 100644 third_party/proto/gogoproto/gogo.proto create mode 100644 third_party/proto/google/api/annotations.proto create mode 100644 third_party/proto/google/api/http.proto create mode 100644 third_party/proto/google/protobuf/any.proto create mode 100644 third_party/proto/tendermint/crypto/keys.proto create mode 100644 third_party/proto/tendermint/crypto/proof.proto create mode 100644 third_party/proto/tendermint/libs/bits/types.proto create mode 100644 third_party/proto/tendermint/types/types.proto create mode 100644 third_party/proto/tendermint/types/validator.proto create mode 100644 third_party/proto/tendermint/version/types.proto diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..2e5239c688 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,3 @@ +# CODEOWNERS: https://help.github.com/articles/about-codeowners/ + +* @colin-axner @fedekunze @AdityaSripal diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..7ed1d5ab86 --- /dev/null +++ b/Makefile @@ -0,0 +1,475 @@ +#!/usr/bin/make -f + +PACKAGES_NOSIMULATION=$(shell go list ./... | grep -v '/simulation') +PACKAGES_SIMTEST=$(shell go list ./... | grep '/simulation') +VERSION := $(shell echo $(shell git describe --always) | sed 's/^v//') +COMMIT := $(shell git log -1 --format='%H') +LEDGER_ENABLED ?= true +BINDIR ?= $(GOPATH)/bin +BUILDDIR ?= $(CURDIR)/build +SIMAPP = ./simapp +MOCKS_DIR = $(CURDIR)/tests/mocks +HTTPS_GIT := https://github.com/cosmos/ibc-go.git +DOCKER := $(shell which docker) +DOCKER_BUF := $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace bufbuild/buf + +export GO111MODULE = on + +# process build tags + +build_tags = netgo +ifeq ($(LEDGER_ENABLED),true) + ifeq ($(OS),Windows_NT) + GCCEXE = $(shell where gcc.exe 2> NUL) + ifeq ($(GCCEXE),) + $(error gcc.exe not installed for ledger support, please install or set LEDGER_ENABLED=false) + else + build_tags += ledger + endif + else + UNAME_S = $(shell uname -s) + ifeq ($(UNAME_S),OpenBSD) + $(warning OpenBSD detected, disabling ledger support (https://github.com/cosmos/cosmos-sdk/issues/1988)) + else + GCC = $(shell command -v gcc 2> /dev/null) + ifeq ($(GCC),) + $(error gcc not installed for ledger support, please install or set LEDGER_ENABLED=false) + else + build_tags += ledger + endif + endif + endif +endif + +ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS))) + build_tags += gcc +endif +build_tags += $(BUILD_TAGS) +build_tags := $(strip $(build_tags)) + +whitespace := +whitespace += $(whitespace) +comma := , +build_tags_comma_sep := $(subst $(whitespace),$(comma),$(build_tags)) + +# process linker flags + +ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=sim \ + -X github.com/cosmos/cosmos-sdk/version.AppName=simd \ + -X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION) \ + -X github.com/cosmos/cosmos-sdk/version.Commit=$(COMMIT) \ + -X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" + +# DB backend selection +ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS))) + ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=cleveldb +endif +ifeq (badgerdb,$(findstring badgerdb,$(COSMOS_BUILD_OPTIONS))) + ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=badgerdb +endif +# handle rocksdb +ifeq (rocksdb,$(findstring rocksdb,$(COSMOS_BUILD_OPTIONS))) + CGO_ENABLED=1 + BUILD_TAGS += rocksdb + ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=rocksdb +endif +# handle boltdb +ifeq (boltdb,$(findstring boltdb,$(COSMOS_BUILD_OPTIONS))) + BUILD_TAGS += boltdb + ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=boltdb +endif + +ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS))) + ldflags += -w -s +endif +ldflags += $(LDFLAGS) +ldflags := $(strip $(ldflags)) + +BUILD_FLAGS := -tags "$(build_tags)" -ldflags '$(ldflags)' +# check for nostrip option +ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS))) + BUILD_FLAGS += -trimpath +endif + +all: tools build lint test + +# The below include contains the tools and runsim targets. +#include contrib/devtools/Makefile + +############################################################################### +### Build ### +############################################################################### + +BUILD_TARGETS := build install + +build: BUILD_ARGS=-o $(BUILDDIR)/ +build-linux: + GOOS=linux GOARCH=amd64 LEDGER_ENABLED=false $(MAKE) build + +$(BUILD_TARGETS): go.sum $(BUILDDIR)/ + go $@ -mod=readonly $(BUILD_FLAGS) $(BUILD_ARGS) ./... + +$(BUILDDIR)/: + mkdir -p $(BUILDDIR)/ + +build-simd-all: go.sum + $(DOCKER) rm latest-build || true + $(DOCKER) run --volume=$(CURDIR):/sources:ro \ + --env TARGET_PLATFORMS='linux/amd64 darwin/amd64 linux/arm64 windows/amd64' \ + --env APP=simd \ + --env VERSION=$(VERSION) \ + --env COMMIT=$(COMMIT) \ + --env LEDGER_ENABLED=$(LEDGER_ENABLED) \ + --name latest-build cosmossdk/rbuilder:latest + $(DOCKER) cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/ + +build-simd-linux: go.sum $(BUILDDIR)/ + $(DOCKER) rm latest-build || true + $(DOCKER) run --volume=$(CURDIR):/sources:ro \ + --env TARGET_PLATFORMS='linux/amd64' \ + --env APP=simd \ + --env VERSION=$(VERSION) \ + --env COMMIT=$(COMMIT) \ + --env LEDGER_ENABLED=false \ + --name latest-build cosmossdk/rbuilder:latest + $(DOCKER) cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/ + cp artifacts/simd-*-linux-amd64 $(BUILDDIR)/simd + +cosmovisor: + $(MAKE) -C cosmovisor cosmovisor + +.PHONY: build build-linux build-simd-all build-simd-linux cosmovisor + +mocks: $(MOCKS_DIR) + mockgen -source=client/account_retriever.go -package mocks -destination tests/mocks/account_retriever.go + mockgen -package mocks -destination tests/mocks/tendermint_tm_db_DB.go github.com/tendermint/tm-db DB + mockgen -source=types/module/module.go -package mocks -destination tests/mocks/types_module_module.go + mockgen -source=types/invariant.go -package mocks -destination tests/mocks/types_invariant.go + mockgen -source=types/router.go -package mocks -destination tests/mocks/types_router.go + mockgen -source=types/handler.go -package mocks -destination tests/mocks/types_handler.go + mockgen -package mocks -destination tests/mocks/grpc_server.go github.com/gogo/protobuf/grpc Server + mockgen -package mocks -destination tests/mocks/tendermint_tendermint_libs_log_DB.go github.com/tendermint/tendermint/libs/log Logger +.PHONY: mocks + +$(MOCKS_DIR): + mkdir -p $(MOCKS_DIR) + +distclean: clean tools-clean +clean: + rm -rf \ + $(BUILDDIR)/ \ + artifacts/ \ + tmp-swagger-gen/ + +.PHONY: distclean clean + +############################################################################### +### Tools & Dependencies ### +############################################################################### + +go.sum: go.mod + echo "Ensure dependencies have not been modified ..." >&2 + go mod verify + go mod tidy + +############################################################################### +### Documentation ### +############################################################################### + +update-swagger-docs: statik + $(BINDIR)/statik -src=client/docs/swagger-ui -dest=client/docs -f -m + @if [ -n "$(git status --porcelain)" ]; then \ + echo "\033[91mSwagger docs are out of sync!!!\033[0m";\ + exit 1;\ + else \ + echo "\033[92mSwagger docs are in sync\033[0m";\ + fi +.PHONY: update-swagger-docs + +godocs: + @echo "--> Wait a few seconds and visit http://localhost:6060/pkg/github.com/cosmos/cosmos-sdk/types" + godoc -http=:6060 + +# This builds a docs site for each branch/tag in `./docs/versions` +# and copies each site to a version prefixed path. The last entry inside +# the `versions` file will be the default root index.html. +build-docs: + @cd docs && \ + while read -r branch path_prefix; do \ + (git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ + mkdir -p ~/output/$${path_prefix} ; \ + cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ + cp ~/output/$${path_prefix}/index.html ~/output ; \ + done < versions ; +.PHONY: build-docs + +############################################################################### +### Tests & Simulation ### +############################################################################### + +test: test-unit +test-all: test-unit test-ledger-mock test-race test-cover + +TEST_PACKAGES=./... +TEST_TARGETS := test-unit test-unit-amino test-unit-proto test-ledger-mock test-race test-ledger test-race + +# Test runs-specific rules. To add a new test target, just add +# a new rule, customise ARGS or TEST_PACKAGES ad libitum, and +# append the new rule to the TEST_TARGETS list. +test-unit: ARGS=-tags='cgo ledger test_ledger_mock norace' +test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino norace' +test-ledger: ARGS=-tags='cgo ledger norace' +test-ledger-mock: ARGS=-tags='ledger test_ledger_mock norace' +test-race: ARGS=-race -tags='cgo ledger test_ledger_mock' +test-race: TEST_PACKAGES=$(PACKAGES_NOSIMULATION) +$(TEST_TARGETS): run-tests + +# check-* compiles and collects tests without running them +# note: go test -c doesn't support multiple packages yet (https://github.com/golang/go/issues/15513) +CHECK_TEST_TARGETS := check-test-unit check-test-unit-amino +check-test-unit: ARGS=-tags='cgo ledger test_ledger_mock norace' +check-test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino norace' +$(CHECK_TEST_TARGETS): EXTRA_ARGS=-run=none +$(CHECK_TEST_TARGETS): run-tests + +run-tests: +ifneq (,$(shell which tparse 2>/dev/null)) + go test -mod=readonly -json $(ARGS) $(EXTRA_ARGS) $(TEST_PACKAGES) | tparse +else + go test -mod=readonly $(ARGS) $(EXTRA_ARGS) $(TEST_PACKAGES) +endif + +.PHONY: run-tests test test-all $(TEST_TARGETS) + +test-sim-nondeterminism: + @echo "Running non-determinism test..." + @go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminism -Enabled=true \ + -NumBlocks=100 -BlockSize=200 -Commit=true -Period=0 -v -timeout 24h + +test-sim-custom-genesis-fast: + @echo "Running custom genesis simulation..." + @echo "By default, ${HOME}/.gaiad/config/genesis.json will be used." + @go test -mod=readonly $(SIMAPP) -run TestFullAppSimulation -Genesis=${HOME}/.gaiad/config/genesis.json \ + -Enabled=true -NumBlocks=100 -BlockSize=200 -Commit=true -Seed=99 -Period=5 -v -timeout 24h + +test-sim-import-export: runsim + @echo "Running application import/export simulation. This may take several minutes..." + @$(BINDIR)/runsim -Jobs=4 -SimAppPkg=$(SIMAPP) -ExitOnFail 50 5 TestAppImportExport + +test-sim-after-import: runsim + @echo "Running application simulation-after-import. This may take several minutes..." + @$(BINDIR)/runsim -Jobs=4 -SimAppPkg=$(SIMAPP) -ExitOnFail 50 5 TestAppSimulationAfterImport + +test-sim-custom-genesis-multi-seed: runsim + @echo "Running multi-seed custom genesis simulation..." + @echo "By default, ${HOME}/.gaiad/config/genesis.json will be used." + @$(BINDIR)/runsim -Genesis=${HOME}/.gaiad/config/genesis.json -SimAppPkg=$(SIMAPP) -ExitOnFail 400 5 TestFullAppSimulation + +test-sim-multi-seed-long: runsim + @echo "Running long multi-seed application simulation. This may take awhile!" + @$(BINDIR)/runsim -Jobs=4 -SimAppPkg=$(SIMAPP) -ExitOnFail 500 50 TestFullAppSimulation + +test-sim-multi-seed-short: runsim + @echo "Running short multi-seed application simulation. This may take awhile!" + @$(BINDIR)/runsim -Jobs=4 -SimAppPkg=$(SIMAPP) -ExitOnFail 50 10 TestFullAppSimulation + +test-sim-benchmark-invariants: + @echo "Running simulation invariant benchmarks..." + @go test -mod=readonly $(SIMAPP) -benchmem -bench=BenchmarkInvariants -run=^$ \ + -Enabled=true -NumBlocks=1000 -BlockSize=200 \ + -Period=1 -Commit=true -Seed=57 -v -timeout 24h + +.PHONY: \ +test-sim-nondeterminism \ +test-sim-custom-genesis-fast \ +test-sim-import-export \ +test-sim-after-import \ +test-sim-custom-genesis-multi-seed \ +test-sim-multi-seed-short \ +test-sim-multi-seed-long \ +test-sim-benchmark-invariants + +SIM_NUM_BLOCKS ?= 500 +SIM_BLOCK_SIZE ?= 200 +SIM_COMMIT ?= true + +test-sim-benchmark: + @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!" + @go test -mod=readonly -benchmem -run=^$$ $(SIMAPP) -bench ^BenchmarkFullAppSimulation$$ \ + -Enabled=true -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h + +test-sim-profile: + @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!" + @go test -mod=readonly -benchmem -run=^$$ $(SIMAPP) -bench ^BenchmarkFullAppSimulation$$ \ + -Enabled=true -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h -cpuprofile cpu.out -memprofile mem.out + +.PHONY: test-sim-profile test-sim-benchmark + +test-cover: + @export VERSION=$(VERSION); bash -x contrib/test_cover.sh +.PHONY: test-cover + +test-rosetta: + docker build -t rosetta-ci:latest -f contrib/rosetta/node/Dockerfile . + docker-compose -f contrib/rosetta/docker-compose.yaml up --abort-on-container-exit --exit-code-from test_rosetta --build +.PHONY: test-rosetta + +benchmark: + @go test -mod=readonly -bench=. $(PACKAGES_NOSIMULATION) +.PHONY: benchmark + +############################################################################### +### Linting ### +############################################################################### + +lint: + golangci-lint run --out-format=tab + +lint-fix: + golangci-lint run --fix --out-format=tab --issues-exit-code=0 +.PHONY: lint lint-fix + +format: + find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs gofmt -w -s + find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs misspell -w + find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/docs/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' | xargs goimports -w -local github.com/cosmos/cosmos-sdk +.PHONY: format + +############################################################################### +### Devdoc ### +############################################################################### + +DEVDOC_SAVE = docker commit `docker ps -a -n 1 -q` devdoc:local + +devdoc-init: + $(DOCKER) run -it -v "$(CURDIR):/go/src/github.com/cosmos/cosmos-sdk" -w "/go/src/github.com/cosmos/cosmos-sdk" tendermint/devdoc echo + # TODO make this safer + $(call DEVDOC_SAVE) + +devdoc: + $(DOCKER) run -it -v "$(CURDIR):/go/src/github.com/cosmos/cosmos-sdk" -w "/go/src/github.com/cosmos/cosmos-sdk" devdoc:local bash + +devdoc-save: + # TODO make this safer + $(call DEVDOC_SAVE) + +devdoc-clean: + docker rmi -f $$(docker images -f "dangling=true" -q) + +devdoc-update: + docker pull tendermint/devdoc + +.PHONY: devdoc devdoc-clean devdoc-init devdoc-save devdoc-update + +############################################################################### +### Protobuf ### +############################################################################### + +proto-all: proto-format proto-lint proto-gen + +proto-gen: + @echo "Generating Protobuf files" + $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace tendermintdev/sdk-proto-gen sh ./scripts/protocgen.sh + +proto-format: + @echo "Formatting Protobuf files" + $(DOCKER) run --rm -v $(CURDIR):/workspace \ + --workdir /workspace tendermintdev/docker-build-proto \ + find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; + +proto-swagger-gen: + @./scripts/protoc-swagger-gen.sh + +proto-lint: + @$(DOCKER_BUF) check lint --error-format=json + +proto-check-breaking: + @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=main + +TM_URL = https://raw.githubusercontent.com/tendermint/tendermint/v0.34.0-rc6/proto/tendermint +GOGO_PROTO_URL = https://raw.githubusercontent.com/regen-network/protobuf/cosmos +CONFIO_URL = https://raw.githubusercontent.com/confio/ics23/v0.6.3 +SDK_PROTO_URL = https://raw.githubusercontent.com/cosmos/cosmos-sdk/v0.41.0/proto/cosmos + +TM_CRYPTO_TYPES = third_party/proto/tendermint/crypto +TM_ABCI_TYPES = third_party/proto/tendermint/abci +TM_TYPES = third_party/proto/tendermint/types +TM_VERSION = third_party/proto/tendermint/version +TM_LIBS = third_party/proto/tendermint/libs/bits +TM_P2P = third_party/proto/tendermint/p2p + +SDK_QUERY = third_party/proto/cosmos/base/query/v1beta1 +SDK_BASE = third_party/proto/cosmos/base/v1beta1 + +GOGO_PROTO_TYPES = third_party/proto/gogoproto +CONFIO_TYPES = third_party/proto/confio + +proto-update-deps: + @mkdir -p $(GOGO_PROTO_TYPES) + @curl -sSL $(GOGO_PROTO_URL)/gogoproto/gogo.proto > $(GOGO_PROTO_TYPES)/gogo.proto + + @mkdir -p $(SDK_QUERY) + @curl -sSL $(SDK_PROTO_URL)/base/query/v1beta1/pagination.proto > $(SDK_QUERY)/pagination.proto + + @mkdir -p $(SDK_BASE) + @curl -sSL $(SDK_PROTO_URL)/base/v1beta1/coin.proto > $(SDK_BASE)/coin.proto + +## Importing of tendermint protobuf definitions currently requires the +## use of `sed` in order to build properly with cosmos-sdk's proto file layout +## (which is the standard Buf.build FILE_LAYOUT) +## Issue link: https://github.com/tendermint/tendermint/issues/5021 + @mkdir -p $(TM_TYPES) + @curl -sSL $(TM_URL)/types/types.proto > $(TM_TYPES)/types.proto + @curl -sSL $(TM_URL)/types/validator.proto > $(TM_TYPES)/validator.proto + + @mkdir -p $(TM_VERSION) + @curl -sSL $(TM_URL)/version/types.proto > $(TM_VERSION)/types.proto + + @mkdir -p $(TM_LIBS) + @curl -sSL $(TM_URL)/libs/bits/types.proto > $(TM_LIBS)/types.proto + + @mkdir -p $(TM_CRYPTO_TYPES) + @curl -sSL $(TM_URL)/crypto/proof.proto > $(TM_CRYPTO_TYPES)/proof.proto + @curl -sSL $(TM_URL)/crypto/keys.proto > $(TM_CRYPTO_TYPES)/keys.proto + + @mkdir -p $(CONFIO_TYPES) + @curl -sSL $(CONFIO_URL)/proofs.proto > $(CONFIO_TYPES)/proofs.proto + +## insert go package option into proofs.proto file +## Issue link: https://github.com/confio/ics23/issues/32 + @sed -i '4ioption go_package = "github.com/confio/ics23/go";' $(CONFIO_TYPES)/proofs.proto + +.PHONY: proto-all proto-gen proto-gen-any proto-swagger-gen proto-format proto-lint proto-check-breaking proto-update-deps + +############################################################################### +### Localnet ### +############################################################################### + +# Run a 4-node testnet locally +localnet-start: build-linux localnet-stop + $(if $(shell $(DOCKER) inspect -f '{{ .Id }}' cosmossdk/simd-env 2>/dev/null),$(info found image cosmossdk/simd-env),$(MAKE) -C contrib/images simd-env) + if ! [ -f build/node0/simd/config/genesis.json ]; then $(DOCKER) run --rm \ + --user $(shell id -u):$(shell id -g) \ + -v $(BUILDDIR):/simd:Z \ + -v /etc/group:/etc/group:ro \ + -v /etc/passwd:/etc/passwd:ro \ + -v /etc/shadow:/etc/shadow:ro \ + cosmossdk/simd-env testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi + docker-compose up -d + +localnet-stop: + docker-compose down + +.PHONY: localnet-start localnet-stop + +############################################################################### +### rosetta ### +############################################################################### +# builds rosetta test data dir +rosetta-data: + -docker container rm data_dir_build + docker build -t rosetta-ci:latest -f contrib/rosetta/node/Dockerfile . + docker run --name data_dir_build -t rosetta-ci:latest sh /rosetta/data.sh + docker cp data_dir_build:/tmp/data.tar.gz "$(CURDIR)/contrib/rosetta/node/data.tar.gz" + docker container rm data_dir_build +.PHONY: rosetta-data diff --git a/applications/transfer/client/cli/cli.go b/applications/transfer/client/cli/cli.go new file mode 100644 index 0000000000..d3ca8341e9 --- /dev/null +++ b/applications/transfer/client/cli/cli.go @@ -0,0 +1,42 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" +) + +// GetQueryCmd returns the query commands for IBC connections +func GetQueryCmd() *cobra.Command { + queryCmd := &cobra.Command{ + Use: "ibc-transfer", + Short: "IBC fungible token transfer query subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + } + + queryCmd.AddCommand( + GetCmdQueryDenomTrace(), + GetCmdQueryDenomTraces(), + GetCmdParams(), + ) + + return queryCmd +} + +// NewTxCmd returns the transaction commands for IBC fungible token transfer +func NewTxCmd() *cobra.Command { + txCmd := &cobra.Command{ + Use: "ibc-transfer", + Short: "IBC fungible token transfer transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + txCmd.AddCommand( + NewTransferTxCmd(), + ) + + return txCmd +} diff --git a/applications/transfer/client/cli/query.go b/applications/transfer/client/cli/query.go new file mode 100644 index 0000000000..b9658e05ae --- /dev/null +++ b/applications/transfer/client/cli/query.go @@ -0,0 +1,108 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// GetCmdQueryDenomTrace defines the command to query a a denomination trace from a given hash. +func GetCmdQueryDenomTrace() *cobra.Command { + cmd := &cobra.Command{ + Use: "denom-trace [hash]", + Short: "Query the denom trace info from a given trace hash", + Long: "Query the denom trace info from a given trace hash", + Example: fmt.Sprintf("%s query ibc-transfer denom-trace [hash]", version.AppName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryDenomTraceRequest{ + Hash: args[0], + } + + res, err := queryClient.DenomTrace(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetCmdQueryDenomTraces defines the command to query all the denomination trace infos +// that this chain mantains. +func GetCmdQueryDenomTraces() *cobra.Command { + cmd := &cobra.Command{ + Use: "denom-traces", + Short: "Query the trace info for all token denominations", + Long: "Query the trace info for all token denominations", + Example: fmt.Sprintf("%s query ibc-transfer denom-traces", version.AppName), + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + req := &types.QueryDenomTracesRequest{ + Pagination: pageReq, + } + + res, err := queryClient.DenomTraces(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "denominations trace") + + return cmd +} + +// GetCmdParams returns the command handler for ibc-transfer parameter querying. +func GetCmdParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "Query the current ibc-transfer parameters", + Long: "Query the current ibc-transfer parameters", + Args: cobra.NoArgs, + Example: fmt.Sprintf("%s query ibc-transfer params", version.AppName), + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + res, _ := queryClient.Params(cmd.Context(), &types.QueryParamsRequest{}) + return clientCtx.PrintProto(res.Params) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/applications/transfer/client/cli/tx.go b/applications/transfer/client/cli/tx.go new file mode 100644 index 0000000000..1f9e92f63c --- /dev/null +++ b/applications/transfer/client/cli/tx.go @@ -0,0 +1,117 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channelutils "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/utils" +) + +const ( + flagPacketTimeoutHeight = "packet-timeout-height" + flagPacketTimeoutTimestamp = "packet-timeout-timestamp" + flagAbsoluteTimeouts = "absolute-timeouts" +) + +// NewTransferTxCmd returns the command to create a NewMsgTransfer transaction +func NewTransferTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "transfer [src-port] [src-channel] [receiver] [amount]", + Short: "Transfer a fungible token through IBC", + Long: strings.TrimSpace(`Transfer a fungible token through IBC. Timeouts can be specified +as absolute or relative using the "absolute-timeouts" flag. Timeout height can be set by passing in the height string +in the form {revision}-{height} using the "packet-timeout-height" flag. Relative timeouts are added to +the block height and block timestamp queried from the latest consensus state corresponding +to the counterparty channel. Any timeout set to 0 is disabled.`), + Example: fmt.Sprintf("%s tx ibc-transfer transfer [src-port] [src-channel] [receiver] [amount]", version.AppName), + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + sender := clientCtx.GetFromAddress() + srcPort := args[0] + srcChannel := args[1] + receiver := args[2] + + coin, err := sdk.ParseCoinNormalized(args[3]) + if err != nil { + return err + } + + if !strings.HasPrefix(coin.Denom, "ibc/") { + denomTrace := types.ParseDenomTrace(coin.Denom) + coin.Denom = denomTrace.IBCDenom() + } + + timeoutHeightStr, err := cmd.Flags().GetString(flagPacketTimeoutHeight) + if err != nil { + return err + } + timeoutHeight, err := clienttypes.ParseHeight(timeoutHeightStr) + if err != nil { + return err + } + + timeoutTimestamp, err := cmd.Flags().GetUint64(flagPacketTimeoutTimestamp) + if err != nil { + return err + } + + absoluteTimeouts, err := cmd.Flags().GetBool(flagAbsoluteTimeouts) + if err != nil { + return err + } + + // if the timeouts are not absolute, retrieve latest block height and block timestamp + // for the consensus state connected to the destination port/channel + if !absoluteTimeouts { + consensusState, height, _, err := channelutils.QueryLatestConsensusState(clientCtx, srcPort, srcChannel) + if err != nil { + return err + } + + if !timeoutHeight.IsZero() { + absoluteHeight := height + absoluteHeight.RevisionNumber += timeoutHeight.RevisionNumber + absoluteHeight.RevisionHeight += timeoutHeight.RevisionHeight + timeoutHeight = absoluteHeight + } + + if timeoutTimestamp != 0 { + timeoutTimestamp = consensusState.GetTimestamp() + timeoutTimestamp + } + } + + msg := types.NewMsgTransfer( + srcPort, srcChannel, coin, sender, receiver, timeoutHeight, timeoutTimestamp, + ) + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.Transfer(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + cmd.Flags().String(flagPacketTimeoutHeight, types.DefaultRelativePacketTimeoutHeight, "Packet timeout block height. The timeout is disabled when set to 0-0.") + cmd.Flags().Uint64(flagPacketTimeoutTimestamp, types.DefaultRelativePacketTimeoutTimestamp, "Packet timeout timestamp in nanoseconds. Default is 10 minutes. The timeout is disabled when set to 0.") + cmd.Flags().Bool(flagAbsoluteTimeouts, false, "Timeout flags are used as absolute timeouts.") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/applications/transfer/handler.go b/applications/transfer/handler.go new file mode 100644 index 0000000000..7c992c920e --- /dev/null +++ b/applications/transfer/handler.go @@ -0,0 +1,23 @@ +package transfer + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// NewHandler returns sdk.Handler for IBC token transfer module messages +func NewHandler(k types.MsgServer) sdk.Handler { + return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + ctx = ctx.WithEventManager(sdk.NewEventManager()) + + switch msg := msg.(type) { + case *types.MsgTransfer: + res, err := k.Transfer(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + default: + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized ICS-20 transfer message type: %T", msg) + } + } +} diff --git a/applications/transfer/handler_test.go b/applications/transfer/handler_test.go new file mode 100644 index 0000000000..92a0421011 --- /dev/null +++ b/applications/transfer/handler_test.go @@ -0,0 +1,123 @@ +package transfer_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type TransferTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + chainC *ibctesting.TestChain +} + +func (suite *TransferTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(2)) +} + +// constructs a send from chainA to chainB on the established channel/connection +// and sends the same coin back from chainB to chainA. +func (suite *TransferTestSuite) TestHandleMsgTransfer() { + // setup between chainA and chainB + clientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB := suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + // originalBalance := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom) + timeoutHeight := clienttypes.NewHeight(0, 110) + + coinToSendToB := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100)) + + // send from chainA to chainB + msg := types.NewMsgTransfer(channelA.PortID, channelA.ID, coinToSendToB, suite.chainA.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0) + + err := suite.coordinator.SendMsg(suite.chainA, suite.chainB, clientB, msg) + suite.Require().NoError(err) // message committed + + // relay send + fungibleTokenPacket := types.NewFungibleTokenPacketData(coinToSendToB.Denom, coinToSendToB.Amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String()) + packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)}) + err = suite.coordinator.RelayPacket(suite.chainA, suite.chainB, clientA, clientB, packet, ack.GetBytes()) + suite.Require().NoError(err) // relay committed + + // check that voucher exists on chain B + voucherDenomTrace := types.ParseDenomTrace(types.GetPrefixedDenom(packet.GetDestPort(), packet.GetDestChannel(), sdk.DefaultBondDenom)) + balance := suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), voucherDenomTrace.IBCDenom()) + + coinSentFromAToB := types.GetTransferCoin(channelB.PortID, channelB.ID, sdk.DefaultBondDenom, 100) + suite.Require().Equal(coinSentFromAToB, balance) + + // setup between chainB to chainC + clientOnBForC, clientOnCForB, connOnBForC, connOnCForB := suite.coordinator.SetupClientConnections(suite.chainB, suite.chainC, exported.Tendermint) + channelOnBForC, channelOnCForB := suite.coordinator.CreateTransferChannels(suite.chainB, suite.chainC, connOnBForC, connOnCForB, channeltypes.UNORDERED) + + // send from chainB to chainC + msg = types.NewMsgTransfer(channelOnBForC.PortID, channelOnBForC.ID, coinSentFromAToB, suite.chainB.SenderAccount.GetAddress(), suite.chainC.SenderAccount.GetAddress().String(), timeoutHeight, 0) + + err = suite.coordinator.SendMsg(suite.chainB, suite.chainC, clientOnCForB, msg) + suite.Require().NoError(err) // message committed + + // relay send + // NOTE: fungible token is prefixed with the full trace in order to verify the packet commitment + fullDenomPath := types.GetPrefixedDenom(channelOnCForB.PortID, channelOnCForB.ID, voucherDenomTrace.GetFullDenomPath()) + fungibleTokenPacket = types.NewFungibleTokenPacketData(voucherDenomTrace.GetFullDenomPath(), coinSentFromAToB.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainC.SenderAccount.GetAddress().String()) + packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelOnBForC.PortID, channelOnBForC.ID, channelOnCForB.PortID, channelOnCForB.ID, timeoutHeight, 0) + err = suite.coordinator.RelayPacket(suite.chainB, suite.chainC, clientOnBForC, clientOnCForB, packet, ack.GetBytes()) + suite.Require().NoError(err) // relay committed + + coinSentFromBToC := sdk.NewInt64Coin(types.ParseDenomTrace(fullDenomPath).IBCDenom(), 100) + balance = suite.chainC.App.BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), coinSentFromBToC.Denom) + + // check that the balance is updated on chainC + suite.Require().Equal(coinSentFromBToC, balance) + + // check that balance on chain B is empty + balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromBToC.Denom) + suite.Require().Zero(balance.Amount.Int64()) + + // send from chainC back to chainB + msg = types.NewMsgTransfer(channelOnCForB.PortID, channelOnCForB.ID, coinSentFromBToC, suite.chainC.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0) + + err = suite.coordinator.SendMsg(suite.chainC, suite.chainB, clientOnBForC, msg) + suite.Require().NoError(err) // message committed + + // relay send + // NOTE: fungible token is prefixed with the full trace in order to verify the packet commitment + fungibleTokenPacket = types.NewFungibleTokenPacketData(fullDenomPath, coinSentFromBToC.Amount.Uint64(), suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String()) + packet = channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelOnCForB.PortID, channelOnCForB.ID, channelOnBForC.PortID, channelOnBForC.ID, timeoutHeight, 0) + err = suite.coordinator.RelayPacket(suite.chainC, suite.chainB, clientOnCForB, clientOnBForC, packet, ack.GetBytes()) + suite.Require().NoError(err) // relay committed + + balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromAToB.Denom) + + // check that the balance on chainA returned back to the original state + suite.Require().Equal(coinSentFromAToB, balance) + + // check that module account escrow address is empty + escrowAddress := types.GetEscrowAddress(packet.GetDestPort(), packet.GetDestChannel()) + balance = suite.chainB.App.BankKeeper.GetBalance(suite.chainB.GetContext(), escrowAddress, sdk.DefaultBondDenom) + suite.Require().Equal(sdk.NewCoin(sdk.DefaultBondDenom, sdk.ZeroInt()), balance) + + // check that balance on chain B is empty + balance = suite.chainC.App.BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), voucherDenomTrace.IBCDenom()) + suite.Require().Zero(balance.Amount.Int64()) +} + +func TestTransferTestSuite(t *testing.T) { + suite.Run(t, new(TransferTestSuite)) +} diff --git a/applications/transfer/keeper/MBT_README.md b/applications/transfer/keeper/MBT_README.md new file mode 100644 index 0000000000..8a5930f6d3 --- /dev/null +++ b/applications/transfer/keeper/MBT_README.md @@ -0,0 +1,51 @@ +## Token Transfer Model-based Testing Guide + +In the process of IBC Audit performed by Informal Systems, we have implemented +a preliminary set of model-based tests for the ICS-20 Token Transfer implementation. + +Model-based tests are based on the formal `TLA+` model of the Token transfer relay functions: see [relay.tla](relay_model/relay.tla). +The tests themselves are simple `TLA+` assertions, that describe the desired shape of execution that send or receive tokens; +see [relay_tests.tla](relay_model/relay_tests.tla) for some examples. +To be able to specify test assertions the TLA+ model contains the `history` variable, +which records the whole execution history. +So, by way of referring to `history` you simply specify declaratively what execution history you want to see. + +After you have specified your `TLA+` test, you can run it using [Apalache model checker](https://github.com/informalsystems/apalache). +E.g. for the test `TestUnescrowTokens` run + +```bash +apalache-mc check --inv=TestUnescrowTokensInv relay_tests.tla +``` + +In case there are no error in the TLA+ model or in the test assertions, this will produce a couple of so-called _counterexamples_. +This is a terminology from the model-checking community; for the testing purposes they can be considered simply as model executions. +See the files `counterexample.tla` for human-readable representation, and `counterexample.json` for machine-readable one. + +In order to execute the produced test, you need to translate it into another format. +For that translation you need the tool [Jsonatr (JSON Arrifact Translator)](https://github.com/informalsystems/jsonatr). +It performs the translation using this [transformation spec](relay_model/apalache-to-relay-test2.json); + +To transform a counterexample into a test, run + +```bash +jsonatr --use apalache-to-relay-test2.json --in counterexample.json --out model_based_tests/YourTestName.json +``` + +Now, if you run `go test` in this directory, the file you have produced above should be picked up by the [model-based test driver](mbt_relay_test.go), +and executed automatically. + + +The easiest way to run Apalache is by +[using a Docker image](https://github.com/informalsystems/apalache/blob/master/docs/manual.md#useDocker); +to run Jsonatr you need to locally clone the repository, and then, +after building it, add the `target/debug` directory into your `PATH`. + +To wrap Apalache docker image into an executable you might create the following executable bash script `apalache-mc`: + +```bash +#!/bin/bash +docker run --rm -v $(pwd):/var/apalache apalache/mc $@ +``` + + +In case of any questions please don't hesitate to contact Andrey Kuprianov (andrey@informal.systems). \ No newline at end of file diff --git a/applications/transfer/keeper/encoding.go b/applications/transfer/keeper/encoding.go new file mode 100644 index 0000000000..ddb1bc4b0c --- /dev/null +++ b/applications/transfer/keeper/encoding.go @@ -0,0 +1,35 @@ +package keeper + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// UnmarshalDenomTrace attempts to decode and return an DenomTrace object from +// raw encoded bytes. +func (k Keeper) UnmarshalDenomTrace(bz []byte) (types.DenomTrace, error) { + var denomTrace types.DenomTrace + if err := k.cdc.UnmarshalBinaryBare(bz, &denomTrace); err != nil { + return types.DenomTrace{}, err + } + return denomTrace, nil +} + +// MustUnmarshalDenomTrace attempts to decode and return an DenomTrace object from +// raw encoded bytes. It panics on error. +func (k Keeper) MustUnmarshalDenomTrace(bz []byte) types.DenomTrace { + var denomTrace types.DenomTrace + k.cdc.MustUnmarshalBinaryBare(bz, &denomTrace) + return denomTrace +} + +// MarshalDenomTrace attempts to encode an DenomTrace object and returns the +// raw encoded bytes. +func (k Keeper) MarshalDenomTrace(denomTrace types.DenomTrace) ([]byte, error) { + return k.cdc.MarshalBinaryBare(&denomTrace) +} + +// MustMarshalDenomTrace attempts to encode an DenomTrace object and returns the +// raw encoded bytes. It panics on error. +func (k Keeper) MustMarshalDenomTrace(denomTrace types.DenomTrace) []byte { + return k.cdc.MustMarshalBinaryBare(&denomTrace) +} diff --git a/applications/transfer/keeper/genesis.go b/applications/transfer/keeper/genesis.go new file mode 100644 index 0000000000..58a0c08115 --- /dev/null +++ b/applications/transfer/keeper/genesis.go @@ -0,0 +1,45 @@ +package keeper + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// InitGenesis initializes the ibc-transfer state and binds to PortID. +func (k Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) { + k.SetPort(ctx, state.PortId) + + for _, trace := range state.DenomTraces { + k.SetDenomTrace(ctx, trace) + } + + // Only try to bind to port if it is not already bound, since we may already own + // port capability from capability InitGenesis + if !k.IsBound(ctx, state.PortId) { + // transfer module binds to the transfer port on InitChain + // and claims the returned capability + err := k.BindPort(ctx, state.PortId) + if err != nil { + panic(fmt.Sprintf("could not claim port capability: %v", err)) + } + } + + k.SetParams(ctx, state.Params) + + // check if the module account exists + moduleAcc := k.GetTransferAccount(ctx) + if moduleAcc == nil { + panic(fmt.Sprintf("%s module account has not been set", types.ModuleName)) + } +} + +// ExportGenesis exports ibc-transfer module's portID and denom trace info into its genesis state. +func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + return &types.GenesisState{ + PortId: k.GetPort(ctx), + DenomTraces: k.GetAllDenomTraces(ctx), + Params: k.GetParams(ctx), + } +} diff --git a/applications/transfer/keeper/genesis_test.go b/applications/transfer/keeper/genesis_test.go new file mode 100644 index 0000000000..a85434911f --- /dev/null +++ b/applications/transfer/keeper/genesis_test.go @@ -0,0 +1,39 @@ +package keeper_test + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +func (suite *KeeperTestSuite) TestGenesis() { + var ( + path string + traces types.Traces + ) + + for i := 0; i < 5; i++ { + prefix := fmt.Sprintf("transfer/channelToChain%d", i) + if i == 0 { + path = prefix + } else { + path = prefix + "/" + path + } + + denomTrace := types.DenomTrace{ + BaseDenom: "uatom", + Path: path, + } + traces = append(types.Traces{denomTrace}, traces...) + suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), denomTrace) + } + + genesis := suite.chainA.App.TransferKeeper.ExportGenesis(suite.chainA.GetContext()) + + suite.Require().Equal(types.PortID, genesis.PortId) + suite.Require().Equal(traces.Sort(), genesis.DenomTraces) + + suite.Require().NotPanics(func() { + suite.chainA.App.TransferKeeper.InitGenesis(suite.chainA.GetContext(), *genesis) + }) +} diff --git a/applications/transfer/keeper/grpc_query.go b/applications/transfer/keeper/grpc_query.go new file mode 100644 index 0000000000..b6347895b4 --- /dev/null +++ b/applications/transfer/keeper/grpc_query.go @@ -0,0 +1,83 @@ +package keeper + +import ( + "context" + "fmt" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +var _ types.QueryServer = Keeper{} + +// DenomTrace implements the Query/DenomTrace gRPC method +func (q Keeper) DenomTrace(c context.Context, req *types.QueryDenomTraceRequest) (*types.QueryDenomTraceResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + hash, err := types.ParseHexHash(req.Hash) + if err != nil { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("invalid denom trace hash %s, %s", req.Hash, err)) + } + + ctx := sdk.UnwrapSDKContext(c) + denomTrace, found := q.GetDenomTrace(ctx, hash) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrap(types.ErrTraceNotFound, req.Hash).Error(), + ) + } + + return &types.QueryDenomTraceResponse{ + DenomTrace: &denomTrace, + }, nil +} + +// DenomTraces implements the Query/DenomTraces gRPC method +func (q Keeper) DenomTraces(c context.Context, req *types.QueryDenomTracesRequest) (*types.QueryDenomTracesResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + + traces := types.Traces{} + store := prefix.NewStore(ctx.KVStore(q.storeKey), types.DenomTraceKey) + + pageRes, err := query.Paginate(store, req.Pagination, func(_, value []byte) error { + result, err := q.UnmarshalDenomTrace(value) + if err != nil { + return err + } + + traces = append(traces, result) + return nil + }) + + if err != nil { + return nil, err + } + + return &types.QueryDenomTracesResponse{ + DenomTraces: traces.Sort(), + Pagination: pageRes, + }, nil +} + +// Params implements the Query/Params gRPC method +func (q Keeper) Params(c context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + params := q.GetParams(ctx) + + return &types.QueryParamsResponse{ + Params: ¶ms, + }, nil +} diff --git a/applications/transfer/keeper/grpc_query_test.go b/applications/transfer/keeper/grpc_query_test.go new file mode 100644 index 0000000000..0b16e0726b --- /dev/null +++ b/applications/transfer/keeper/grpc_query_test.go @@ -0,0 +1,142 @@ +package keeper_test + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +func (suite *KeeperTestSuite) TestQueryDenomTrace() { + var ( + req *types.QueryDenomTraceRequest + expTrace types.DenomTrace + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "invalid hex hash", + func() { + req = &types.QueryDenomTraceRequest{ + Hash: "!@#!@#!", + } + }, + false, + }, + { + "not found denom trace", + func() { + expTrace.Path = "transfer/channelToA/transfer/channelToB" + expTrace.BaseDenom = "uatom" + req = &types.QueryDenomTraceRequest{ + Hash: expTrace.Hash().String(), + } + }, + false, + }, + { + "success", + func() { + expTrace.Path = "transfer/channelToA/transfer/channelToB" + expTrace.BaseDenom = "uatom" + suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), expTrace) + + req = &types.QueryDenomTraceRequest{ + Hash: expTrace.Hash().String(), + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.queryClient.DenomTrace(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(&expTrace, res.DenomTrace) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryDenomTraces() { + var ( + req *types.QueryDenomTracesRequest + expTraces = types.Traces(nil) + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty pagination", + func() { + req = &types.QueryDenomTracesRequest{} + }, + true, + }, + { + "success", + func() { + expTraces = append(expTraces, types.DenomTrace{Path: "", BaseDenom: "uatom"}) + expTraces = append(expTraces, types.DenomTrace{Path: "transfer/channelToB", BaseDenom: "uatom"}) + expTraces = append(expTraces, types.DenomTrace{Path: "transfer/channelToA/transfer/channelToB", BaseDenom: "uatom"}) + + for _, trace := range expTraces { + suite.chainA.App.TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), trace) + } + + req = &types.QueryDenomTracesRequest{ + Pagination: &query.PageRequest{ + Limit: 5, + CountTotal: false, + }, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.queryClient.DenomTraces(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expTraces.Sort(), res.DenomTraces) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryParams() { + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + expParams := types.DefaultParams() + res, _ := suite.queryClient.Params(ctx, &types.QueryParamsRequest{}) + suite.Require().Equal(&expParams, res.Params) +} diff --git a/applications/transfer/keeper/keeper.go b/applications/transfer/keeper/keeper.go new file mode 100644 index 0000000000..a2eebb55e1 --- /dev/null +++ b/applications/transfer/keeper/keeper.go @@ -0,0 +1,169 @@ +package keeper + +import ( + tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +// Keeper defines the IBC fungible transfer keeper +type Keeper struct { + storeKey sdk.StoreKey + cdc codec.BinaryMarshaler + paramSpace paramtypes.Subspace + + channelKeeper types.ChannelKeeper + portKeeper types.PortKeeper + authKeeper types.AccountKeeper + bankKeeper types.BankKeeper + scopedKeeper capabilitykeeper.ScopedKeeper +} + +// NewKeeper creates a new IBC transfer Keeper instance +func NewKeeper( + cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace, + channelKeeper types.ChannelKeeper, portKeeper types.PortKeeper, + authKeeper types.AccountKeeper, bankKeeper types.BankKeeper, scopedKeeper capabilitykeeper.ScopedKeeper, +) Keeper { + + // ensure ibc transfer module account is set + if addr := authKeeper.GetModuleAddress(types.ModuleName); addr == nil { + panic("the IBC transfer module account has not been set") + } + + // set KeyTable if it has not already been set + if !paramSpace.HasKeyTable() { + paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable()) + } + + return Keeper{ + cdc: cdc, + storeKey: key, + paramSpace: paramSpace, + channelKeeper: channelKeeper, + portKeeper: portKeeper, + authKeeper: authKeeper, + bankKeeper: bankKeeper, + scopedKeeper: scopedKeeper, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", "x/"+host.ModuleName+"-"+types.ModuleName) +} + +// GetTransferAccount returns the ICS20 - transfers ModuleAccount +func (k Keeper) GetTransferAccount(ctx sdk.Context) authtypes.ModuleAccountI { + return k.authKeeper.GetModuleAccount(ctx, types.ModuleName) +} + +// ChanCloseInit defines a wrapper function for the channel Keeper's function +// in order to expose it to the ICS20 transfer handler. +func (k Keeper) ChanCloseInit(ctx sdk.Context, portID, channelID string) error { + capName := host.ChannelCapabilityPath(portID, channelID) + chanCap, ok := k.scopedKeeper.GetCapability(ctx, capName) + if !ok { + return sdkerrors.Wrapf(channeltypes.ErrChannelCapabilityNotFound, "could not retrieve channel capability at: %s", capName) + } + return k.channelKeeper.ChanCloseInit(ctx, portID, channelID, chanCap) +} + +// IsBound checks if the transfer module is already bound to the desired port +func (k Keeper) IsBound(ctx sdk.Context, portID string) bool { + _, ok := k.scopedKeeper.GetCapability(ctx, host.PortPath(portID)) + return ok +} + +// BindPort defines a wrapper function for the ort Keeper's function in +// order to expose it to module's InitGenesis function +func (k Keeper) BindPort(ctx sdk.Context, portID string) error { + cap := k.portKeeper.BindPort(ctx, portID) + return k.ClaimCapability(ctx, cap, host.PortPath(portID)) +} + +// GetPort returns the portID for the transfer module. Used in ExportGenesis +func (k Keeper) GetPort(ctx sdk.Context) string { + store := ctx.KVStore(k.storeKey) + return string(store.Get(types.PortKey)) +} + +// SetPort sets the portID for the transfer module. Used in InitGenesis +func (k Keeper) SetPort(ctx sdk.Context, portID string) { + store := ctx.KVStore(k.storeKey) + store.Set(types.PortKey, []byte(portID)) +} + +// GetDenomTrace retreives the full identifiers trace and base denomination from the store. +func (k Keeper) GetDenomTrace(ctx sdk.Context, denomTraceHash tmbytes.HexBytes) (types.DenomTrace, bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DenomTraceKey) + bz := store.Get(denomTraceHash) + if bz == nil { + return types.DenomTrace{}, false + } + + denomTrace := k.MustUnmarshalDenomTrace(bz) + return denomTrace, true +} + +// HasDenomTrace checks if a the key with the given denomination trace hash exists on the store. +func (k Keeper) HasDenomTrace(ctx sdk.Context, denomTraceHash tmbytes.HexBytes) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DenomTraceKey) + return store.Has(denomTraceHash) +} + +// SetDenomTrace sets a new {trace hash -> denom trace} pair to the store. +func (k Keeper) SetDenomTrace(ctx sdk.Context, denomTrace types.DenomTrace) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DenomTraceKey) + bz := k.MustMarshalDenomTrace(denomTrace) + store.Set(denomTrace.Hash(), bz) +} + +// GetAllDenomTraces returns the trace information for all the denominations. +func (k Keeper) GetAllDenomTraces(ctx sdk.Context) types.Traces { + traces := types.Traces{} + k.IterateDenomTraces(ctx, func(denomTrace types.DenomTrace) bool { + traces = append(traces, denomTrace) + return false + }) + + return traces.Sort() +} + +// IterateDenomTraces iterates over the denomination traces in the store +// and performs a callback function. +func (k Keeper) IterateDenomTraces(ctx sdk.Context, cb func(denomTrace types.DenomTrace) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.DenomTraceKey) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + + denomTrace := k.MustUnmarshalDenomTrace(iterator.Value()) + if cb(denomTrace) { + break + } + } +} + +// AuthenticateCapability wraps the scopedKeeper's AuthenticateCapability function +func (k Keeper) AuthenticateCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) bool { + return k.scopedKeeper.AuthenticateCapability(ctx, cap, name) +} + +// ClaimCapability allows the transfer module that can claim a capability that IBC module +// passes to it +func (k Keeper) ClaimCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) error { + return k.scopedKeeper.ClaimCapability(ctx, cap, name) +} diff --git a/applications/transfer/keeper/keeper_test.go b/applications/transfer/keeper/keeper_test.go new file mode 100644 index 0000000000..cce9cbccae --- /dev/null +++ b/applications/transfer/keeper/keeper_test.go @@ -0,0 +1,51 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + "github.com/tendermint/tendermint/crypto" + + "github.com/cosmos/cosmos-sdk/baseapp" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type KeeperTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + chainC *ibctesting.TestChain + + queryClient types.QueryClient +} + +func (suite *KeeperTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(2)) + + queryHelper := baseapp.NewQueryServerTestHelper(suite.chainA.GetContext(), suite.chainA.App.InterfaceRegistry()) + types.RegisterQueryServer(queryHelper, suite.chainA.App.TransferKeeper) + suite.queryClient = types.NewQueryClient(queryHelper) +} + +func (suite *KeeperTestSuite) TestGetTransferAccount() { + expectedMaccAddr := sdk.AccAddress(crypto.AddressHash([]byte(types.ModuleName))) + + macc := suite.chainA.App.TransferKeeper.GetTransferAccount(suite.chainA.GetContext()) + + suite.Require().NotNil(macc) + suite.Require().Equal(types.ModuleName, macc.GetName()) + suite.Require().Equal(expectedMaccAddr, macc.GetAddress()) +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} diff --git a/applications/transfer/keeper/mbt_relay_test.go b/applications/transfer/keeper/mbt_relay_test.go new file mode 100644 index 0000000000..cd64fbabc2 --- /dev/null +++ b/applications/transfer/keeper/mbt_relay_test.go @@ -0,0 +1,378 @@ +package keeper_test + +/// This file is a test driver for model-based tests generated from the TLA+ model of token transfer +/// Written by Andrey Kuprianov within the scope of IBC Audit performed by Informal Systems. +/// In case of any questions please don't hesitate to contact andrey@informal.systems. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strconv" + "strings" + + "github.com/tendermint/tendermint/crypto" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type TlaBalance struct { + Address []string `json:"address"` + Denom []string `json:"denom"` + Amount int64 `json:"amount"` +} + +type TlaFungibleTokenPacketData struct { + Sender string `json:"sender"` + Receiver string `json:"receiver"` + Amount int `json:"amount"` + Denom []string `json:"denom"` +} + +type TlaFungibleTokenPacket struct { + SourceChannel string `json:"sourceChannel"` + SourcePort string `json:"sourcePort"` + DestChannel string `json:"destChannel"` + DestPort string `json:"destPort"` + Data TlaFungibleTokenPacketData `json:"data"` +} + +type TlaOnRecvPacketTestCase = struct { + // The required subset of bank balances + BankBefore []TlaBalance `json:"bankBefore"` + // The packet to process + Packet TlaFungibleTokenPacket `json:"packet"` + // The handler to call + Handler string `json:"handler"` + // The expected changes in the bank + BankAfter []TlaBalance `json:"bankAfter"` + // Whether OnRecvPacket should fail or not + Error bool `json:"error"` +} + +type FungibleTokenPacket struct { + SourceChannel string + SourcePort string + DestChannel string + DestPort string + Data types.FungibleTokenPacketData +} + +type OnRecvPacketTestCase = struct { + description string + // The required subset of bank balances + bankBefore []Balance + // The packet to process + packet FungibleTokenPacket + // The handler to call + handler string + // The expected bank state after processing (wrt. bankBefore) + bankAfter []Balance + // Whether OnRecvPacket should pass or fail + pass bool +} + +type OwnedCoin struct { + Address string + Denom string +} + +type Balance struct { + Id string + Address string + Denom string + Amount sdk.Int +} + +func AddressFromString(address string) string { + return sdk.AccAddress(crypto.AddressHash([]byte(address))).String() +} + +func AddressFromTla(addr []string) string { + if len(addr) != 3 { + panic("failed to convert from TLA+ address: wrong number of address components") + } + s := "" + if len(addr[0]) == 0 && len(addr[1]) == 0 { + // simple address: id + s = addr[2] + } else if len(addr[2]) == 0 { + // escrow address: ics20-1\x00port/channel + s = fmt.Sprintf("%s\x00%s/%s", types.Version, addr[0], addr[1]) + } else { + panic("failed to convert from TLA+ address: neither simple nor escrow address") + } + return s +} + +func DenomFromTla(denom []string) string { + var i int + for i = 0; i+1 < len(denom) && len(denom[i]) == 0 && len(denom[i+1]) == 0; i += 2 { + // skip empty prefixes + } + return strings.Join(denom[i:], "/") +} + +func BalanceFromTla(balance TlaBalance) Balance { + return Balance{ + Id: AddressFromTla(balance.Address), + Address: AddressFromString(AddressFromTla(balance.Address)), + Denom: DenomFromTla(balance.Denom), + Amount: sdk.NewInt(balance.Amount), + } +} + +func BalancesFromTla(tla []TlaBalance) []Balance { + balances := make([]Balance, 0) + for _, b := range tla { + balances = append(balances, BalanceFromTla(b)) + } + return balances +} + +func FungibleTokenPacketFromTla(packet TlaFungibleTokenPacket) FungibleTokenPacket { + return FungibleTokenPacket{ + SourceChannel: packet.SourceChannel, + SourcePort: packet.SourcePort, + DestChannel: packet.DestChannel, + DestPort: packet.DestPort, + Data: types.NewFungibleTokenPacketData( + DenomFromTla(packet.Data.Denom), + uint64(packet.Data.Amount), + AddressFromString(packet.Data.Sender), + AddressFromString(packet.Data.Receiver)), + } +} + +func OnRecvPacketTestCaseFromTla(tc TlaOnRecvPacketTestCase) OnRecvPacketTestCase { + return OnRecvPacketTestCase{ + description: "auto-generated", + bankBefore: BalancesFromTla(tc.BankBefore), + packet: FungibleTokenPacketFromTla(tc.Packet), + handler: tc.Handler, + bankAfter: BalancesFromTla(tc.BankAfter), // TODO different semantics + pass: !tc.Error, + } +} + +var addressMap = make(map[string]string) + +type Bank struct { + balances map[OwnedCoin]sdk.Int +} + +// Make an empty bank +func MakeBank() Bank { + return Bank{balances: make(map[OwnedCoin]sdk.Int)} +} + +// Subtract other bank from this bank +func (bank *Bank) Sub(other *Bank) Bank { + diff := MakeBank() + for coin, amount := range bank.balances { + otherAmount, exists := other.balances[coin] + if exists { + diff.balances[coin] = amount.Sub(otherAmount) + } else { + diff.balances[coin] = amount + } + } + for coin, amount := range other.balances { + if _, exists := bank.balances[coin]; !exists { + diff.balances[coin] = amount.Neg() + } + } + return diff +} + +// Set specific bank balance +func (bank *Bank) SetBalance(address string, denom string, amount sdk.Int) { + bank.balances[OwnedCoin{address, denom}] = amount +} + +// Set several balances at once +func (bank *Bank) SetBalances(balances []Balance) { + for _, balance := range balances { + bank.balances[OwnedCoin{balance.Address, balance.Denom}] = balance.Amount + addressMap[balance.Address] = balance.Id + } +} + +func NullCoin() OwnedCoin { + return OwnedCoin{ + Address: AddressFromString(""), + Denom: "", + } +} + +// Set several balances at once +func BankFromBalances(balances []Balance) Bank { + bank := MakeBank() + for _, balance := range balances { + coin := OwnedCoin{balance.Address, balance.Denom} + if coin != NullCoin() { // ignore null coin + bank.balances[coin] = balance.Amount + addressMap[balance.Address] = balance.Id + } + } + return bank +} + +// String representation of all bank balances +func (bank *Bank) String() string { + str := "" + for coin, amount := range bank.balances { + str += coin.Address + if addressMap[coin.Address] != "" { + str += "(" + addressMap[coin.Address] + ")" + } + str += " : " + coin.Denom + " = " + amount.String() + "\n" + } + return str +} + +// String representation of non-zero bank balances +func (bank *Bank) NonZeroString() string { + str := "" + for coin, amount := range bank.balances { + if !amount.IsZero() { + str += coin.Address + " : " + coin.Denom + " = " + amount.String() + "\n" + } + } + return str +} + +// Construct a bank out of the chain bank +func BankOfChain(chain *ibctesting.TestChain) Bank { + bank := MakeBank() + chain.App.BankKeeper.IterateAllBalances(chain.GetContext(), func(address sdk.AccAddress, coin sdk.Coin) (stop bool) { + fullDenom := coin.Denom + if strings.HasPrefix(coin.Denom, "ibc/") { + fullDenom, _ = chain.App.TransferKeeper.DenomPathFromHash(chain.GetContext(), coin.Denom) + } + bank.SetBalance(address.String(), fullDenom, coin.Amount) + return false + }) + return bank +} + +// Check that the state of the bank is the bankBefore + expectedBankChange +func (suite *KeeperTestSuite) CheckBankBalances(chain *ibctesting.TestChain, bankBefore *Bank, expectedBankChange *Bank) error { + bankAfter := BankOfChain(chain) + bankChange := bankAfter.Sub(bankBefore) + diff := bankChange.Sub(expectedBankChange) + NonZeroString := diff.NonZeroString() + if len(NonZeroString) != 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidCoins, "Unexpected changes in the bank: \n"+NonZeroString) + } + return nil +} + +func (suite *KeeperTestSuite) TestModelBasedRelay() { + dirname := "model_based_tests/" + files, err := ioutil.ReadDir(dirname) + if err != nil { + panic(fmt.Errorf("Failed to read model-based test files: %w", err)) + } + for _, file_info := range files { + var tlaTestCases = []TlaOnRecvPacketTestCase{} + if !strings.HasSuffix(file_info.Name(), ".json") { + continue + } + jsonBlob, err := ioutil.ReadFile(dirname + file_info.Name()) + if err != nil { + panic(fmt.Errorf("Failed to read JSON test fixture: %w", err)) + } + err = json.Unmarshal([]byte(jsonBlob), &tlaTestCases) + if err != nil { + panic(fmt.Errorf("Failed to parse JSON test fixture: %w", err)) + } + + suite.SetupTest() + _, _, connAB, connBA := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + _, _, connBC, connCB := suite.coordinator.SetupClientConnections(suite.chainB, suite.chainC, exported.Tendermint) + suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connAB, connBA, channeltypes.UNORDERED) + suite.coordinator.CreateTransferChannels(suite.chainB, suite.chainC, connBC, connCB, channeltypes.UNORDERED) + + for i, tlaTc := range tlaTestCases { + tc := OnRecvPacketTestCaseFromTla(tlaTc) + registerDenom := func() { + denomTrace := types.ParseDenomTrace(tc.packet.Data.Denom) + traceHash := denomTrace.Hash() + if !suite.chainB.App.TransferKeeper.HasDenomTrace(suite.chainB.GetContext(), traceHash) { + suite.chainB.App.TransferKeeper.SetDenomTrace(suite.chainB.GetContext(), denomTrace) + } + } + + description := file_info.Name() + " # " + strconv.Itoa(i+1) + suite.Run(fmt.Sprintf("Case %s", description), func() { + seq := uint64(1) + packet := channeltypes.NewPacket(tc.packet.Data.GetBytes(), seq, tc.packet.SourcePort, tc.packet.SourceChannel, tc.packet.DestPort, tc.packet.DestChannel, clienttypes.NewHeight(0, 100), 0) + bankBefore := BankFromBalances(tc.bankBefore) + realBankBefore := BankOfChain(suite.chainB) + // First validate the packet itself (mimics what happens when the packet is being sent and/or received) + err := packet.ValidateBasic() + if err != nil { + suite.Require().False(tc.pass, err.Error()) + return + } + switch tc.handler { + case "SendTransfer": + var sender sdk.AccAddress + sender, err = sdk.AccAddressFromBech32(tc.packet.Data.Sender) + if err != nil { + panic("MBT failed to convert sender address") + } + registerDenom() + denomTrace := types.ParseDenomTrace(tc.packet.Data.Denom) + denom := denomTrace.IBCDenom() + err = sdk.ValidateDenom(denom) + if err == nil { + err = suite.chainB.App.TransferKeeper.SendTransfer( + suite.chainB.GetContext(), + tc.packet.SourcePort, + tc.packet.SourceChannel, + sdk.NewCoin(denom, sdk.NewIntFromUint64(tc.packet.Data.Amount)), + sender, + tc.packet.Data.Receiver, + clienttypes.NewHeight(0, 110), + 0) + } + case "OnRecvPacket": + err = suite.chainB.App.TransferKeeper.OnRecvPacket(suite.chainB.GetContext(), packet, tc.packet.Data) + case "OnTimeoutPacket": + registerDenom() + err = suite.chainB.App.TransferKeeper.OnTimeoutPacket(suite.chainB.GetContext(), packet, tc.packet.Data) + case "OnRecvAcknowledgementResult": + err = suite.chainB.App.TransferKeeper.OnAcknowledgementPacket( + suite.chainB.GetContext(), packet, tc.packet.Data, + channeltypes.NewResultAcknowledgement(nil)) + case "OnRecvAcknowledgementError": + registerDenom() + err = suite.chainB.App.TransferKeeper.OnAcknowledgementPacket( + suite.chainB.GetContext(), packet, tc.packet.Data, + channeltypes.NewErrorAcknowledgement("MBT Error Acknowledgement")) + default: + err = fmt.Errorf("Unknown handler: %s", tc.handler) + } + if err != nil { + suite.Require().False(tc.pass, err.Error()) + return + } + bankAfter := BankFromBalances(tc.bankAfter) + expectedBankChange := bankAfter.Sub(&bankBefore) + if err := suite.CheckBankBalances(suite.chainB, &realBankBefore, &expectedBankChange); err != nil { + suite.Require().False(tc.pass, err.Error()) + return + } + suite.Require().True(tc.pass) + }) + } + } +} diff --git a/applications/transfer/keeper/model_based_tests/Test5Packets.json b/applications/transfer/keeper/model_based_tests/Test5Packets.json new file mode 100644 index 0000000000..6ccdccc8ae --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/Test5Packets.json @@ -0,0 +1,492 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-1", + "destPort": "transfer", + "data": { + "sender": "a3", + "receiver": "a3", + "amount": 2, + "denom": [ + "", + "", + "", + "", + "btc" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "ethereum-hub", + "sourcePort": "channel-0", + "destChannel": "channel-1", + "destPort": "transfer", + "data": { + "sender": "a1", + "receiver": "a3", + "amount": 1, + "denom": [ + "cosmos-hub", + "", + "", + "", + "btc" + ] + } + }, + "handler": "SendTransfer", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "error": true + }, + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-1", + "destPort": "transfer", + "data": { + "sender": "a2", + "receiver": "a2", + "amount": 4, + "denom": [ + "", + "", + "ethereum-hub", + "cosmos-hub", + "atom" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "ethereum-hub", + "cosmos-hub", + "atom" + ], + "amount": 4 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-1", + "destPort": "transfer", + "data": { + "sender": "", + "receiver": "a2", + "amount": 4, + "denom": [ + "", + "", + "ethereum-hub", + "cosmos-hub", + "atom" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "ethereum-hub", + "cosmos-hub", + "atom" + ], + "amount": 4 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "ethereum-hub", + "cosmos-hub", + "atom" + ], + "amount": 8 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "cosmos-hub", + "sourcePort": "bitcoin-hub", + "destChannel": "channel-0", + "destPort": "channel-1", + "data": { + "sender": "a1", + "receiver": "", + "amount": 1, + "denom": [ + "transfer", + "channel-0", + "transfer", + "channel-0", + "atom" + ] + } + }, + "handler": "SendTransfer", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "ethereum-hub", + "cosmos-hub", + "atom" + ], + "amount": 8 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "ethereum-hub", + "cosmos-hub", + "atom" + ], + "amount": 8 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-1", + "btc" + ], + "amount": 2 + } + ], + "error": true + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/Test5Packets.tla b/applications/transfer/keeper/model_based_tests/Test5Packets.tla new file mode 100644 index 0000000000..9691eec2f2 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/Test5Packets.tla @@ -0,0 +1,1056 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 3 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 +/\ count = 1 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "channel-0"] + +(* Transition 0 to State4 *) + +State4 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 +/\ count = 2 +/\ error = TRUE +/\ handler = "SendTransfer" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> TRUE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "channel-0"]] +/\ p = [data |-> + [amount |-> 4, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a2"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 5 to State5 *) + +State5 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 4 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 +/\ count = 3 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> TRUE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "channel-0"]] + @@ 3 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 4 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 4, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> + [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a2"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 4, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 5 to State6 *) + +State6 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 8 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 +/\ count = 4 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> TRUE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "channel-0"]] + @@ 3 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 4 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 4, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> + [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a2"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 4 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 8 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 4 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 4, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> + [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]], + receiver |-> "", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "channel-1", + sourceChannel |-> "cosmos-hub", + sourcePort |-> "bitcoin-hub"] + +(* Transition 0 to State7 *) + +State7 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 8 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 +/\ count = 5 +/\ error = TRUE +/\ handler = "SendTransfer" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> TRUE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> "cosmos-hub"]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "channel-0"]] + @@ 3 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 4 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 4, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> + [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a2"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 4 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 8 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 4 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 4, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> + [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 5 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 8 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 8 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "channel-1", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + error |-> TRUE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]], + receiver |-> "", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "channel-1", + sourceChannel |-> "cosmos-hub", + sourcePort |-> "bitcoin-hub"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + count >= 5 + /\ BMC!Skolem((\E s1$2 \in DOMAIN history: + BMC!Skolem((\E s2$2 \in DOMAIN history: + ~(history[s1$2]["handler"] = history[s2$2]["handler"]))))) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:52:41 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json b/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json new file mode 100644 index 0000000000..6a039f3eca --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.json @@ -0,0 +1,612 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a3", + "receiver": "a2", + "amount": 3, + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ] + } + }, + "handler": "OnTimeoutPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 3 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-1", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a2", + "receiver": "a1", + "amount": 3, + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "btc" + ] + } + }, + "handler": "OnRecvAcknowledgementError", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 3 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "btc" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 3 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-1", + "destPort": "transfer", + "data": { + "sender": "a1", + "receiver": "a2", + "amount": 3, + "denom": [ + "", + "", + "cosmos-hub", + "cosmos-hub", + "atom" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "btc" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 3 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "atom" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "btc" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 3 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "cosmos-hub", + "sourcePort": "bitcoin-hub", + "destChannel": "transfer", + "destPort": "cosmos-hub", + "data": { + "sender": "a1", + "receiver": "", + "amount": 2, + "denom": [ + "", + "channel-0", + "channel-1", + "channel-1", + "" + ] + } + }, + "handler": "OnRecvAcknowledgementResult", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "atom" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "btc" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 3 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "atom" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "btc" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 3 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-1", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a3", + "receiver": "a3", + "amount": 1, + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ] + } + }, + "handler": "SendTransfer", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "atom" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "btc" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 3 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "atom" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-1", + "cosmos-hub", + "cosmos-hub", + "btc" + ], + "amount": 3 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 2 + }, + { + "address": [ + "transfer", + "channel-1", + "" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "eth" + ], + "amount": 1 + } + ], + "error": false + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla b/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla new file mode 100644 index 0000000000..89e6d87be5 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/Test5PacketsAllDifferentPass.tla @@ -0,0 +1,1188 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 6 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3 +/\ count = 1 +/\ error = FALSE +/\ handler = "OnTimeoutPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnTimeoutPacket", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a2"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"] + +(* Transition 10 to State4 *) + +State4 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3 +/\ count = 2 +/\ error = FALSE +/\ handler = "OnRecvAcknowledgementError" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnTimeoutPacket", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvAcknowledgementError", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a2"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 5 to State5 *) + +State5 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3 +/\ count = 3 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnTimeoutPacket", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvAcknowledgementError", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a2"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] + @@ 3 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "channel-1", port |-> "channel-1"], + prefix1 |-> [channel |-> "channel-0", port |-> ""]], + receiver |-> "", + sender |-> "a1"], + destChannel |-> "transfer", + destPort |-> "cosmos-hub", + sourceChannel |-> "cosmos-hub", + sourcePort |-> "bitcoin-hub"] + +(* Transition 12 to State6 *) + +State6 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3 +/\ count = 4 +/\ error = FALSE +/\ handler = "OnRecvAcknowledgementResult" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnTimeoutPacket", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvAcknowledgementError", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a2"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] + @@ 3 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 4 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvAcknowledgementResult", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "channel-1", port |-> "channel-1"], + prefix1 |-> [channel |-> "channel-0", port |-> ""]], + receiver |-> "", + sender |-> "a1"], + destChannel |-> "transfer", + destPort |-> "cosmos-hub", + sourceChannel |-> "cosmos-hub", + sourcePort |-> "bitcoin-hub"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"] + +(* Transition 1 to State7 *) + +State7 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 1 +/\ count = 5 +/\ error = FALSE +/\ handler = "SendTransfer" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnTimeoutPacket", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvAcknowledgementError", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a2"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] + @@ 3 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 4 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvAcknowledgementResult", + packet |-> + [data |-> + [amount |-> 2, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "channel-1", port |-> "channel-1"], + prefix1 |-> [channel |-> "channel-0", port |-> ""]], + receiver |-> "", + sender |-> "a1"], + destChannel |-> "transfer", + destPort |-> "cosmos-hub", + sourceChannel |-> "cosmos-hub", + sourcePort |-> "bitcoin-hub"]] + @@ 5 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> + "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 3 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "cosmos-hub", port |-> "transfer"]], + receiver |-> "", + sender |-> ""], + destChannel |-> "bitcoin-hub", + destPort |-> "ethereum-hub", + sourceChannel |-> "transfer", + sourcePort |-> "channel-1"] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + (count >= 5 + /\ (\A s1$2 \in DOMAIN history: + \A s2$2 \in DOMAIN history: + s1$2 = s2$2 \/ ~(history[s1$2]["handler"] = history[s2$2]["handler"]))) + /\ (\A s$2 \in DOMAIN history: + s$2 <= 0 + \/ (history[s$2]["error"] = FALSE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 12:49:42 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json new file mode 100644 index 0000000000..f1f553210b --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.json @@ -0,0 +1,58 @@ +[ + { + "packet": { + "sourceChannel": "", + "sourcePort": "", + "destChannel": "", + "destPort": "", + "data": { + "sender": "a1", + "receiver": "a2", + "amount": 1, + "denom": [ + "cosmos-hub", + "transfer", + "channel-0", + "cosmos-hub", + "btc" + ] + } + }, + "handler": "OnRecvAcknowledgementError", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "error": true + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla new file mode 100644 index 0000000000..583b3211dc --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorFail.tla @@ -0,0 +1,159 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* Transition 7 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 1 +/\ error = TRUE +/\ handler = "OnRecvAcknowledgementError" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> TRUE, + handler |-> "OnRecvAcknowledgementError", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "OnRecvAcknowledgementError" + /\ history[s$2]["error"] = TRUE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:15:18 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json new file mode 100644 index 0000000000..3fbfe7fdf0 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.json @@ -0,0 +1,159 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-1", + "destPort": "transfer", + "data": { + "sender": "", + "receiver": "a1", + "amount": 1, + "denom": [ + "", + "", + "channel-0", + "ethereum-hub", + "btc" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a1" + ], + "denom": [ + "transfer", + "channel-1", + "channel-0", + "ethereum-hub", + "btc" + ], + "amount": 1 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-1", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a1", + "receiver": "a2", + "amount": 1, + "denom": [ + "transfer", + "channel-1", + "channel-0", + "ethereum-hub", + "btc" + ] + } + }, + "handler": "OnRecvAcknowledgementError", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a1" + ], + "denom": [ + "transfer", + "channel-1", + "channel-0", + "ethereum-hub", + "btc" + ], + "amount": 1 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a1" + ], + "denom": [ + "transfer", + "channel-1", + "channel-0", + "ethereum-hub", + "btc" + ], + "amount": 2 + } + ], + "error": false + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla new file mode 100644 index 0000000000..cd43eb2647 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementErrorPass.tla @@ -0,0 +1,310 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 2 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 1 +/\ count = 1 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"] + +(* Transition 11 to State4 *) + +State4 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 2 +/\ count = 2 +/\ error = FALSE +/\ handler = "OnRecvAcknowledgementError" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> ""], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 1, + error |-> FALSE, + handler |-> "OnRecvAcknowledgementError", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "ethereum-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "OnRecvAcknowledgementError" + /\ history[s$2]["error"] = FALSE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:14:33 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json new file mode 100644 index 0000000000..9110a38ab6 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.json @@ -0,0 +1,58 @@ +[ + { + "packet": { + "sourceChannel": "", + "sourcePort": "", + "destChannel": "", + "destPort": "", + "data": { + "sender": "a1", + "receiver": "a2", + "amount": 1, + "denom": [ + "cosmos-hub", + "transfer", + "channel-0", + "cosmos-hub", + "btc" + ] + } + }, + "handler": "OnRecvAcknowledgementResult", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "error": true + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla new file mode 100644 index 0000000000..b97ec73a3d --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultFail.tla @@ -0,0 +1,159 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* Transition 13 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 1 +/\ error = TRUE +/\ handler = "OnRecvAcknowledgementResult" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> TRUE, + handler |-> "OnRecvAcknowledgementResult", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "OnRecvAcknowledgementResult" + /\ history[s$2]["error"] = TRUE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:13:42 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json new file mode 100644 index 0000000000..5215df7da3 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.json @@ -0,0 +1,58 @@ +[ + { + "packet": { + "sourceChannel": "ethereum-hub", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "ethereum-hub", + "data": { + "sender": "a1", + "receiver": "a2", + "amount": 1, + "denom": [ + "cosmos-hub", + "transfer", + "channel-0", + "cosmos-hub", + "btc" + ] + } + }, + "handler": "OnRecvAcknowledgementResult", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "error": false + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla new file mode 100644 index 0000000000..f9d049c546 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvAcknowledgementResultPass.tla @@ -0,0 +1,159 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "ethereum-hub", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "ethereum-hub", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "transfer"] + +(* Transition 12 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 1 +/\ error = FALSE +/\ handler = "OnRecvAcknowledgementResult" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "ethereum-hub", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvAcknowledgementResult", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "ethereum-hub", + sourceChannel |-> "ethereum-hub", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "OnRecvAcknowledgementResult" + /\ history[s$2]["error"] = FALSE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:12:59 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json new file mode 100644 index 0000000000..9a7e8c406e --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.json @@ -0,0 +1,58 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "", + "receiver": "", + "amount": 1, + "denom": [ + "", + "", + "transfer", + "channel-0", + "" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "error": true + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla new file mode 100644 index 0000000000..980be28ae2 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketFail.tla @@ -0,0 +1,159 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 3 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 1 +/\ error = TRUE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> TRUE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "OnRecvPacket" + /\ history[s$2]["error"] = TRUE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:02:31 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json new file mode 100644 index 0000000000..35f94c5720 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.json @@ -0,0 +1,73 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "", + "receiver": "a2", + "amount": 1, + "denom": [ + "", + "", + "ethereum-hub", + "cosmos-hub", + "btc" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-0", + "ethereum-hub", + "cosmos-hub", + "btc" + ], + "amount": 1 + } + ], + "error": false + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla new file mode 100644 index 0000000000..342b097feb --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnRecvPacketPass.tla @@ -0,0 +1,174 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> + [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 5 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 1 +/\ count = 1 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> + [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> + [channel |-> "cosmos-hub", port |-> "ethereum-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "OnRecvPacket" + /\ history[s$2]["error"] = FALSE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:01:28 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json b/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json new file mode 100644 index 0000000000..a78ed85ca5 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.json @@ -0,0 +1,58 @@ +[ + { + "packet": { + "sourceChannel": "", + "sourcePort": "", + "destChannel": "", + "destPort": "", + "data": { + "sender": "a1", + "receiver": "a2", + "amount": 1, + "denom": [ + "cosmos-hub", + "transfer", + "channel-0", + "cosmos-hub", + "btc" + ] + } + }, + "handler": "OnTimeoutPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "error": true + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla b/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla new file mode 100644 index 0000000000..1bc209d9d5 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnTimeoutFail.tla @@ -0,0 +1,159 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* Transition 6 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 1 +/\ error = TRUE +/\ handler = "OnTimeoutPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> TRUE, + handler |-> "OnTimeoutPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "channel-0"], + prefix1 |-> [channel |-> "transfer", port |-> "cosmos-hub"]], + receiver |-> "a2", + sender |-> "a1"], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "OnTimeoutPacket" + /\ history[s$2]["error"] = TRUE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:09:25 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json b/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json new file mode 100644 index 0000000000..3136aace65 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.json @@ -0,0 +1,159 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-1", + "destPort": "transfer", + "data": { + "sender": "a3", + "receiver": "a1", + "amount": 1, + "denom": [ + "", + "", + "bitcoin-hub", + "transfer", + "btc" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a1" + ], + "denom": [ + "transfer", + "channel-1", + "bitcoin-hub", + "transfer", + "btc" + ], + "amount": 1 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-1", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a1", + "receiver": "", + "amount": 1, + "denom": [ + "transfer", + "channel-1", + "bitcoin-hub", + "transfer", + "btc" + ] + } + }, + "handler": "OnTimeoutPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a1" + ], + "denom": [ + "transfer", + "channel-1", + "bitcoin-hub", + "transfer", + "btc" + ], + "amount": 1 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a1" + ], + "denom": [ + "transfer", + "channel-1", + "bitcoin-hub", + "transfer", + "btc" + ], + "amount": 2 + } + ], + "error": false + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla b/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla new file mode 100644 index 0000000000..5dc5a994ae --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestOnTimeoutPass.tla @@ -0,0 +1,310 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 2 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 1 +/\ count = 1 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"] + +(* Transition 10 to State4 *) + +State4 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 2 +/\ count = 2 +/\ error = FALSE +/\ handler = "OnTimeoutPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]] + >> + :> 1, + error |-> FALSE, + handler |-> "OnTimeoutPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-1", port |-> "transfer"]], + receiver |-> "", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "OnTimeoutPacket" + /\ history[s$2]["error"] = FALSE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:07:37 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferFail.json b/applications/transfer/keeper/model_based_tests/TestSendTransferFail.json new file mode 100644 index 0000000000..01d589d867 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestSendTransferFail.json @@ -0,0 +1,58 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "", + "receiver": "", + "amount": 1, + "denom": [ + "", + "", + "", + "", + "" + ] + } + }, + "handler": "SendTransfer", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "error": true + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla b/applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla new file mode 100644 index 0000000000..dc3a1c008b --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestSendTransferFail.tla @@ -0,0 +1,159 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 0 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 1 +/\ error = TRUE +/\ handler = "SendTransfer" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> TRUE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "SendTransfer" + /\ history[s$2]["error"] = TRUE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 11:00:34 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferPass.json b/applications/transfer/keeper/model_based_tests/TestSendTransferPass.json new file mode 100644 index 0000000000..452d2b3aa9 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestSendTransferPass.json @@ -0,0 +1,174 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a3", + "receiver": "a2", + "amount": 1, + "denom": [ + "", + "", + "cosmos-hub", + "cosmos-hub", + "eth" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-0", + "cosmos-hub", + "cosmos-hub", + "eth" + ], + "amount": 1 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-1", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a2", + "receiver": "a1", + "amount": 1, + "denom": [ + "transfer", + "channel-0", + "cosmos-hub", + "cosmos-hub", + "eth" + ] + } + }, + "handler": "SendTransfer", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-0", + "cosmos-hub", + "cosmos-hub", + "eth" + ], + "amount": 1 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a2" + ], + "denom": [ + "transfer", + "channel-0", + "cosmos-hub", + "cosmos-hub", + "eth" + ], + "amount": 0 + }, + { + "address": [ + "transfer", + "channel-1", + "" + ], + "denom": [ + "transfer", + "channel-0", + "cosmos-hub", + "cosmos-hub", + "eth" + ], + "amount": 1 + } + ], + "error": false + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla b/applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla new file mode 100644 index 0000000000..23c45c6773 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestSendTransferPass.tla @@ -0,0 +1,323 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 2 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 1 +/\ count = 1 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a2"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"] + +(* Transition 1 to State4 *) + +State4 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 0 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 1 +/\ count = 2 +/\ error = FALSE +/\ handler = "SendTransfer" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a2", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 0 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> + "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 1, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a2", port |-> ""], [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]] + >> + :> 1, + error |-> FALSE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "eth", + prefix0 |-> [channel |-> "cosmos-hub", port |-> "cosmos-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a2"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "", + sender |-> ""], + destChannel |-> "", + destPort |-> "", + sourceChannel |-> "", + sourcePort |-> ""] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + BMC!Skolem((\E s$2 \in DOMAIN history: + history[s$2]["handler"] = "SendTransfer" + /\ history[s$2]["error"] = FALSE + /\ history[s$2]["packet"]["data"]["amount"] > 0)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 10:58:54 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json b/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json new file mode 100644 index 0000000000..9855220704 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.json @@ -0,0 +1,305 @@ +[ + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a1", + "receiver": "a3", + "amount": 5, + "denom": [ + "", + "", + "", + "", + "atom" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 5 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-1", + "sourcePort": "transfer", + "destChannel": "channel-0", + "destPort": "transfer", + "data": { + "sender": "a3", + "receiver": "a1", + "amount": 3, + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ] + } + }, + "handler": "SendTransfer", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 5 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 2 + }, + { + "address": [ + "transfer", + "channel-1", + "" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 3 + } + ], + "error": false + }, + { + "packet": { + "sourceChannel": "channel-0", + "sourcePort": "transfer", + "destChannel": "channel-1", + "destPort": "transfer", + "data": { + "sender": "a1", + "receiver": "a1", + "amount": 1, + "denom": [ + "transfer", + "channel-0", + "transfer", + "channel-0", + "atom" + ] + } + }, + "handler": "OnRecvPacket", + "bankBefore": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 2 + }, + { + "address": [ + "transfer", + "channel-1", + "" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 3 + } + ], + "bankAfter": [ + { + "address": [ + "", + "", + "" + ], + "denom": [ + "", + "", + "", + "", + "" + ], + "amount": 0 + }, + { + "address": [ + "", + "", + "a1" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 1 + }, + { + "address": [ + "", + "", + "a3" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 2 + }, + { + "address": [ + "transfer", + "channel-1", + "" + ], + "denom": [ + "", + "", + "transfer", + "channel-0", + "atom" + ], + "amount": 2 + } + ], + "error": false + } +] \ No newline at end of file diff --git a/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla b/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla new file mode 100644 index 0000000000..e99081c123 --- /dev/null +++ b/applications/transfer/keeper/model_based_tests/TestUnescrowTokens.tla @@ -0,0 +1,563 @@ +------------------------- MODULE counterexample ------------------------- + +EXTENDS relay_tests + +(* Initial state *) + +State1 == +TRUE +(* Transition 0 to State2 *) + +State2 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 +/\ count = 0 +/\ error = FALSE +/\ handler = "" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 5, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 5, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 3 to State3 *) + +State3 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 5 +/\ count = 1 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 5, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 5, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 5, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"] + +(* Transition 1 to State4 *) + +State4 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3 +/\ count = 2 +/\ error = FALSE +/\ handler = "SendTransfer" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 5, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 5, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 5, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> + "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 5, + error |-> FALSE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* Transition 4 to State5 *) + +State5 == +/\ bank = << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 1 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 +/\ count = 3 +/\ error = FALSE +/\ handler = "OnRecvPacket" +/\ history = 0 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "", + packet |-> + [data |-> + [amount |-> 5, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 1 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 5, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 5, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a3", + sender |-> "a1"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] + @@ 2 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> + "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 5, + error |-> FALSE, + handler |-> "SendTransfer", + packet |-> + [data |-> + [amount |-> 3, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]], + receiver |-> "a1", + sender |-> "a3"], + destChannel |-> "channel-0", + destPort |-> "transfer", + sourceChannel |-> "channel-1", + sourcePort |-> "transfer"]] + @@ 3 + :> [bankAfter |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a1", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 1 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> + "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2, + bankBefore |-> + << + [channel |-> "", id |-> "", port |-> ""], [denom |-> "", + prefix0 |-> [channel |-> "", port |-> ""], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 0 + @@ << + [channel |-> "", id |-> "a3", port |-> ""], [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 2 + @@ << + [channel |-> "channel-1", id |-> "", port |-> "transfer"], [denom |-> + "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "", port |-> ""]] + >> + :> 3, + error |-> FALSE, + handler |-> "OnRecvPacket", + packet |-> + [data |-> + [amount |-> 1, + denomTrace |-> + [denom |-> "atom", + prefix0 |-> [channel |-> "channel-0", port |-> "transfer"], + prefix1 |-> [channel |-> "channel-0", port |-> "transfer"]], + receiver |-> "a1", + sender |-> "a1"], + destChannel |-> "channel-1", + destPort |-> "transfer", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"]] +/\ p = [data |-> + [amount |-> 0, + denomTrace |-> + [denom |-> "btc", + prefix0 |-> [channel |-> "transfer", port |-> "bitcoin-hub"], + prefix1 |-> [channel |-> "channel-0", port |-> "channel-1"]], + receiver |-> "a1", + sender |-> ""], + destChannel |-> "ethereum-hub", + destPort |-> "cosmos-hub", + sourceChannel |-> "channel-0", + sourcePort |-> "transfer"] + +(* The following formula holds true in the last state and violates the invariant *) + +InvariantViolation == + history[1]["handler"] = "OnRecvPacket" + /\ BMC!Skolem((\E s$2 \in DOMAIN history: + ((IF history[s$2]["packet"]["data"]["denomTrace"]["prefix0"] + = [port |-> "", channel |-> ""] + THEN [port |-> "", channel |-> ""] + ELSE IF history[s$2]["packet"]["data"]["denomTrace"]["prefix1"] + = [port |-> "", channel |-> ""] + THEN history[s$2]["packet"]["data"]["denomTrace"]["prefix0"] + ELSE history[s$2]["packet"]["data"]["denomTrace"]["prefix1"])[ + "port" + ] + = history[s$2]["packet"]["sourcePort"] + /\ (IF history[s$2]["packet"]["data"]["denomTrace"]["prefix0"] + = [port |-> "", channel |-> ""] + THEN [port |-> "", channel |-> ""] + ELSE IF history[s$2]["packet"]["data"]["denomTrace"]["prefix1"] + = [port |-> "", channel |-> ""] + THEN history[s$2]["packet"]["data"]["denomTrace"]["prefix0"] + ELSE history[s$2]["packet"]["data"]["denomTrace"]["prefix1"])[ + "channel" + ] + = history[s$2]["packet"]["sourceChannel"]) + /\ history[s$2]["handler"] = "OnRecvPacket" + /\ history[s$2]["error"] = FALSE)) + +================================================================================ +\* Created by Apalache on Thu Dec 10 13:38:11 CET 2020 +\* https://github.com/informalsystems/apalache diff --git a/applications/transfer/keeper/msg_server.go b/applications/transfer/keeper/msg_server.go new file mode 100644 index 0000000000..dd2999af34 --- /dev/null +++ b/applications/transfer/keeper/msg_server.go @@ -0,0 +1,43 @@ +package keeper + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +var _ types.MsgServer = Keeper{} + +// See createOutgoingPacket in spec:https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#packet-relay + +// Transfer defines a rpc handler method for MsgTransfer. +func (k Keeper) Transfer(goCtx context.Context, msg *types.MsgTransfer) (*types.MsgTransferResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + sender, err := sdk.AccAddressFromBech32(msg.Sender) + if err != nil { + return nil, err + } + if err := k.SendTransfer( + ctx, msg.SourcePort, msg.SourceChannel, msg.Token, sender, msg.Receiver, msg.TimeoutHeight, msg.TimeoutTimestamp, + ); err != nil { + return nil, err + } + + k.Logger(ctx).Info("IBC fungible token transfer", "token", msg.Token.Denom, "amount", msg.Token.Amount.String(), "sender", msg.Sender, "receiver", msg.Receiver) + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeTransfer, + sdk.NewAttribute(sdk.AttributeKeySender, msg.Sender), + sdk.NewAttribute(types.AttributeKeyReceiver, msg.Receiver), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + ), + }) + + return &types.MsgTransferResponse{}, nil +} diff --git a/applications/transfer/keeper/params.go b/applications/transfer/keeper/params.go new file mode 100644 index 0000000000..39a6c5d53d --- /dev/null +++ b/applications/transfer/keeper/params.go @@ -0,0 +1,30 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// GetSendEnabled retrieves the send enabled boolean from the paramstore +func (k Keeper) GetSendEnabled(ctx sdk.Context) bool { + var res bool + k.paramSpace.Get(ctx, types.KeySendEnabled, &res) + return res +} + +// GetReceiveEnabled retrieves the receive enabled boolean from the paramstore +func (k Keeper) GetReceiveEnabled(ctx sdk.Context) bool { + var res bool + k.paramSpace.Get(ctx, types.KeyReceiveEnabled, &res) + return res +} + +// GetParams returns the total set of ibc-transfer parameters. +func (k Keeper) GetParams(ctx sdk.Context) types.Params { + return types.NewParams(k.GetSendEnabled(ctx), k.GetReceiveEnabled(ctx)) +} + +// SetParams sets the total set of ibc-transfer parameters. +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramSpace.SetParamSet(ctx, ¶ms) +} diff --git a/applications/transfer/keeper/params_test.go b/applications/transfer/keeper/params_test.go new file mode 100644 index 0000000000..96f17ff7f1 --- /dev/null +++ b/applications/transfer/keeper/params_test.go @@ -0,0 +1,15 @@ +package keeper_test + +import "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + +func (suite *KeeperTestSuite) TestParams() { + expParams := types.DefaultParams() + + params := suite.chainA.App.TransferKeeper.GetParams(suite.chainA.GetContext()) + suite.Require().Equal(expParams, params) + + expParams.SendEnabled = false + suite.chainA.App.TransferKeeper.SetParams(suite.chainA.GetContext(), expParams) + params = suite.chainA.App.TransferKeeper.GetParams(suite.chainA.GetContext()) + suite.Require().Equal(expParams, params) +} diff --git a/applications/transfer/keeper/relay.go b/applications/transfer/keeper/relay.go new file mode 100644 index 0000000000..4889014a40 --- /dev/null +++ b/applications/transfer/keeper/relay.go @@ -0,0 +1,406 @@ +package keeper + +import ( + "fmt" + "strings" + + "github.com/armon/go-metrics" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// SendTransfer handles transfer sending logic. There are 2 possible cases: +// +// 1. Sender chain is acting as the source zone. The coins are transferred +// to an escrow address (i.e locked) on the sender chain and then transferred +// to the receiving chain through IBC TAO logic. It is expected that the +// receiving chain will mint vouchers to the receiving address. +// +// 2. Sender chain is acting as the sink zone. The coins (vouchers) are burned +// on the sender chain and then transferred to the receiving chain though IBC +// TAO logic. It is expected that the receiving chain, which had previously +// sent the original denomination, will unescrow the fungible token and send +// it to the receiving address. +// +// Another way of thinking of source and sink zones is through the token's +// timeline. Each send to any chain other than the one it was previously +// received from is a movement forwards in the token's timeline. This causes +// trace to be added to the token's history and the destination port and +// destination channel to be prefixed to the denomination. In these instances +// the sender chain is acting as the source zone. When the token is sent back +// to the chain it previously received from, the prefix is removed. This is +// a backwards movement in the token's timeline and the sender chain +// is acting as the sink zone. +// +// Example: +// These steps of transfer occur: A -> B -> C -> A -> C -> B -> A +// +// 1. A -> B : sender chain is source zone. Denom upon receiving: 'B/denom' +// 2. B -> C : sender chain is source zone. Denom upon receiving: 'C/B/denom' +// 3. C -> A : sender chain is source zone. Denom upon receiving: 'A/C/B/denom' +// 4. A -> C : sender chain is sink zone. Denom upon receiving: 'C/B/denom' +// 5. C -> B : sender chain is sink zone. Denom upon receiving: 'B/denom' +// 6. B -> A : sender chain is sink zone. Denom upon receiving: 'denom' +func (k Keeper) SendTransfer( + ctx sdk.Context, + sourcePort, + sourceChannel string, + token sdk.Coin, + sender sdk.AccAddress, + receiver string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, +) error { + + if !k.GetSendEnabled(ctx) { + return types.ErrSendDisabled + } + + sourceChannelEnd, found := k.channelKeeper.GetChannel(ctx, sourcePort, sourceChannel) + if !found { + return sdkerrors.Wrapf(channeltypes.ErrChannelNotFound, "port ID (%s) channel ID (%s)", sourcePort, sourceChannel) + } + + destinationPort := sourceChannelEnd.GetCounterparty().GetPortID() + destinationChannel := sourceChannelEnd.GetCounterparty().GetChannelID() + + // get the next sequence + sequence, found := k.channelKeeper.GetNextSequenceSend(ctx, sourcePort, sourceChannel) + if !found { + return sdkerrors.Wrapf( + channeltypes.ErrSequenceSendNotFound, + "source port: %s, source channel: %s", sourcePort, sourceChannel, + ) + } + + // begin createOutgoingPacket logic + // See spec for this logic: https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#packet-relay + channelCap, ok := k.scopedKeeper.GetCapability(ctx, host.ChannelCapabilityPath(sourcePort, sourceChannel)) + if !ok { + return sdkerrors.Wrap(channeltypes.ErrChannelCapabilityNotFound, "module does not own channel capability") + } + + // NOTE: denomination and hex hash correctness checked during msg.ValidateBasic + fullDenomPath := token.Denom + + var err error + + // deconstruct the token denomination into the denomination trace info + // to determine if the sender is the source chain + if strings.HasPrefix(token.Denom, "ibc/") { + fullDenomPath, err = k.DenomPathFromHash(ctx, token.Denom) + if err != nil { + return err + } + } + + labels := []metrics.Label{ + telemetry.NewLabel("destination-port", destinationPort), + telemetry.NewLabel("destination-channel", destinationChannel), + } + + // NOTE: SendTransfer simply sends the denomination as it exists on its own + // chain inside the packet data. The receiving chain will perform denom + // prefixing as necessary. + + if types.SenderChainIsSource(sourcePort, sourceChannel, fullDenomPath) { + labels = append(labels, telemetry.NewLabel("source", "true")) + + // create the escrow address for the tokens + escrowAddress := types.GetEscrowAddress(sourcePort, sourceChannel) + + // escrow source tokens. It fails if balance insufficient. + if err := k.bankKeeper.SendCoins( + ctx, sender, escrowAddress, sdk.NewCoins(token), + ); err != nil { + return err + } + + } else { + labels = append(labels, telemetry.NewLabel("source", "false")) + + // transfer the coins to the module account and burn them + if err := k.bankKeeper.SendCoinsFromAccountToModule( + ctx, sender, types.ModuleName, sdk.NewCoins(token), + ); err != nil { + return err + } + + if err := k.bankKeeper.BurnCoins( + ctx, types.ModuleName, sdk.NewCoins(token), + ); err != nil { + // NOTE: should not happen as the module account was + // retrieved on the step above and it has enough balace + // to burn. + panic(fmt.Sprintf("cannot burn coins after a successful send to a module account: %v", err)) + } + } + + packetData := types.NewFungibleTokenPacketData( + fullDenomPath, token.Amount.Uint64(), sender.String(), receiver, + ) + + packet := channeltypes.NewPacket( + packetData.GetBytes(), + sequence, + sourcePort, + sourceChannel, + destinationPort, + destinationChannel, + timeoutHeight, + timeoutTimestamp, + ) + + if err := k.channelKeeper.SendPacket(ctx, channelCap, packet); err != nil { + return err + } + + defer func() { + telemetry.SetGaugeWithLabels( + []string{"tx", "msg", "ibc", "transfer"}, + float32(token.Amount.Int64()), + []metrics.Label{telemetry.NewLabel("denom", fullDenomPath)}, + ) + + telemetry.IncrCounterWithLabels( + []string{"ibc", types.ModuleName, "send"}, + 1, + labels, + ) + }() + + return nil +} + +// OnRecvPacket processes a cross chain fungible token transfer. If the +// sender chain is the source of minted tokens then vouchers will be minted +// and sent to the receiving address. Otherwise if the sender chain is sending +// back tokens this chain originally transferred to it, the tokens are +// unescrowed and sent to the receiving address. +func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, data types.FungibleTokenPacketData) error { + // validate packet data upon receiving + if err := data.ValidateBasic(); err != nil { + return err + } + + if !k.GetReceiveEnabled(ctx) { + return types.ErrReceiveDisabled + } + + // decode the receiver address + receiver, err := sdk.AccAddressFromBech32(data.Receiver) + if err != nil { + return err + } + + labels := []metrics.Label{ + telemetry.NewLabel("source-port", packet.GetSourcePort()), + telemetry.NewLabel("source-channel", packet.GetSourceChannel()), + } + + // This is the prefix that would have been prefixed to the denomination + // on sender chain IF and only if the token originally came from the + // receiving chain. + // + // NOTE: We use SourcePort and SourceChannel here, because the counterparty + // chain would have prefixed with DestPort and DestChannel when originally + // receiving this coin as seen in the "sender chain is the source" condition. + + if types.ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), data.Denom) { + // sender chain is not the source, unescrow tokens + + // remove prefix added by sender chain + voucherPrefix := types.GetDenomPrefix(packet.GetSourcePort(), packet.GetSourceChannel()) + unprefixedDenom := data.Denom[len(voucherPrefix):] + + // coin denomination used in sending from the escrow address + denom := unprefixedDenom + + // The denomination used to send the coins is either the native denom or the hash of the path + // if the denomination is not native. + denomTrace := types.ParseDenomTrace(unprefixedDenom) + if denomTrace.Path != "" { + denom = denomTrace.IBCDenom() + } + token := sdk.NewCoin(denom, sdk.NewIntFromUint64(data.Amount)) + + // unescrow tokens + escrowAddress := types.GetEscrowAddress(packet.GetDestPort(), packet.GetDestChannel()) + if err := k.bankKeeper.SendCoins(ctx, escrowAddress, receiver, sdk.NewCoins(token)); err != nil { + // NOTE: this error is only expected to occur given an unexpected bug or a malicious + // counterparty module. The bug may occur in bank or any part of the code that allows + // the escrow address to be drained. A malicious counterparty module could drain the + // escrow address by allowing more tokens to be sent back then were escrowed. + return sdkerrors.Wrap(err, "unable to unescrow tokens, this may be caused by a malicious counterparty module or a bug: please open an issue on counterparty module") + } + + defer func() { + telemetry.SetGaugeWithLabels( + []string{"ibc", types.ModuleName, "packet", "receive"}, + float32(data.Amount), + []metrics.Label{telemetry.NewLabel("denom", unprefixedDenom)}, + ) + + telemetry.IncrCounterWithLabels( + []string{"ibc", types.ModuleName, "receive"}, + 1, + append( + labels, telemetry.NewLabel("source", "true"), + ), + ) + }() + + return nil + } + + // sender chain is the source, mint vouchers + + // since SendPacket did not prefix the denomination, we must prefix denomination here + sourcePrefix := types.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel()) + // NOTE: sourcePrefix contains the trailing "/" + prefixedDenom := sourcePrefix + data.Denom + + // construct the denomination trace from the full raw denomination + denomTrace := types.ParseDenomTrace(prefixedDenom) + + traceHash := denomTrace.Hash() + if !k.HasDenomTrace(ctx, traceHash) { + k.SetDenomTrace(ctx, denomTrace) + } + + voucherDenom := denomTrace.IBCDenom() + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeDenomTrace, + sdk.NewAttribute(types.AttributeKeyTraceHash, traceHash.String()), + sdk.NewAttribute(types.AttributeKeyDenom, voucherDenom), + ), + ) + + voucher := sdk.NewCoin(voucherDenom, sdk.NewIntFromUint64(data.Amount)) + + // mint new tokens if the source of the transfer is the same chain + if err := k.bankKeeper.MintCoins( + ctx, types.ModuleName, sdk.NewCoins(voucher), + ); err != nil { + return err + } + + // send to receiver + if err := k.bankKeeper.SendCoinsFromModuleToAccount( + ctx, types.ModuleName, receiver, sdk.NewCoins(voucher), + ); err != nil { + panic(fmt.Sprintf("unable to send coins from module to account despite previously minting coins to module account: %v", err)) + } + + defer func() { + telemetry.SetGaugeWithLabels( + []string{"ibc", types.ModuleName, "packet", "receive"}, + float32(data.Amount), + []metrics.Label{telemetry.NewLabel("denom", data.Denom)}, + ) + + telemetry.IncrCounterWithLabels( + []string{"ibc", types.ModuleName, "receive"}, + 1, + append( + labels, telemetry.NewLabel("source", "false"), + ), + ) + }() + + return nil +} + +// OnAcknowledgementPacket responds to the the success or failure of a packet +// acknowledgement written on the receiving chain. If the acknowledgement +// was a success then nothing occurs. If the acknowledgement failed, then +// the sender is refunded their tokens using the refundPacketToken function. +func (k Keeper) OnAcknowledgementPacket(ctx sdk.Context, packet channeltypes.Packet, data types.FungibleTokenPacketData, ack channeltypes.Acknowledgement) error { + switch ack.Response.(type) { + case *channeltypes.Acknowledgement_Error: + return k.refundPacketToken(ctx, packet, data) + default: + // the acknowledgement succeeded on the receiving chain so nothing + // needs to be executed and no error needs to be returned + return nil + } +} + +// OnTimeoutPacket refunds the sender since the original packet sent was +// never received and has been timed out. +func (k Keeper) OnTimeoutPacket(ctx sdk.Context, packet channeltypes.Packet, data types.FungibleTokenPacketData) error { + return k.refundPacketToken(ctx, packet, data) +} + +// refundPacketToken will unescrow and send back the tokens back to sender +// if the sending chain was the source chain. Otherwise, the sent tokens +// were burnt in the original send so new tokens are minted and sent to +// the sending address. +func (k Keeper) refundPacketToken(ctx sdk.Context, packet channeltypes.Packet, data types.FungibleTokenPacketData) error { + // NOTE: packet data type already checked in handler.go + + // parse the denomination from the full denom path + trace := types.ParseDenomTrace(data.Denom) + + token := sdk.NewCoin(trace.IBCDenom(), sdk.NewIntFromUint64(data.Amount)) + + // decode the sender address + sender, err := sdk.AccAddressFromBech32(data.Sender) + if err != nil { + return err + } + + if types.SenderChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), data.Denom) { + // unescrow tokens back to sender + escrowAddress := types.GetEscrowAddress(packet.GetSourcePort(), packet.GetSourceChannel()) + if err := k.bankKeeper.SendCoins(ctx, escrowAddress, sender, sdk.NewCoins(token)); err != nil { + // NOTE: this error is only expected to occur given an unexpected bug or a malicious + // counterparty module. The bug may occur in bank or any part of the code that allows + // the escrow address to be drained. A malicious counterparty module could drain the + // escrow address by allowing more tokens to be sent back then were escrowed. + return sdkerrors.Wrap(err, "unable to unescrow tokens, this may be caused by a malicious counterparty module or a bug: please open an issue on counterparty module") + } + + return nil + } + + // mint vouchers back to sender + if err := k.bankKeeper.MintCoins( + ctx, types.ModuleName, sdk.NewCoins(token), + ); err != nil { + return err + } + + if err := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, types.ModuleName, sender, sdk.NewCoins(token)); err != nil { + panic(fmt.Sprintf("unable to send coins from module to account despite previously minting coins to module account: %v", err)) + } + + return nil +} + +// DenomPathFromHash returns the full denomination path prefix from an ibc denom with a hash +// component. +func (k Keeper) DenomPathFromHash(ctx sdk.Context, denom string) (string, error) { + // trim the denomination prefix, by default "ibc/" + hexHash := denom[len(types.DenomPrefix+"/"):] + + hash, err := types.ParseHexHash(hexHash) + if err != nil { + return "", sdkerrors.Wrap(types.ErrInvalidDenomForTransfer, err.Error()) + } + + denomTrace, found := k.GetDenomTrace(ctx, hash) + if !found { + return "", sdkerrors.Wrap(types.ErrTraceNotFound, hexHash) + } + + fullDenomPath := denomTrace.GetFullDenomPath() + return fullDenomPath, nil +} diff --git a/applications/transfer/keeper/relay_model/account.tla b/applications/transfer/keeper/relay_model/account.tla new file mode 100644 index 0000000000..84d743f6da --- /dev/null +++ b/applications/transfer/keeper/relay_model/account.tla @@ -0,0 +1,36 @@ +-------------------------- MODULE account ---------------------------- + +(** + The accounts interface; please ignore the definition bodies. +*) + +EXTENDS identifiers + +CONSTANT + AccountIds + +\* a non-account +NullAccount == "NullAccount" + +\* All accounts +Accounts == { NullAccount } + +\* Make an escrow account for the given port and channel +MakeEscrowAccount(port, channel) == NullAccount + +\* Make an account from the accound id +MakeAccount(accountId) == NullAccount + +\* Type constraints for accounts +AccountTypeOK == + /\ NullAccount \in Accounts + /\ \A p \in Identifiers, c \in Identifiers: + MakeEscrowAccount(p, c) \in Accounts + /\ \A a \in Identifiers: + MakeAccount(a) \in Accounts + +============================================================================= +\* Modification History +\* Last modified Thu Nov 19 18:21:10 CET 2020 by c +\* Last modified Thu Nov 05 14:44:18 CET 2020 by andrey +\* Created Thu Nov 05 13:22:40 CET 2020 by andrey diff --git a/applications/transfer/keeper/relay_model/account_record.tla b/applications/transfer/keeper/relay_model/account_record.tla new file mode 100644 index 0000000000..c7eed27af1 --- /dev/null +++ b/applications/transfer/keeper/relay_model/account_record.tla @@ -0,0 +1,46 @@ +-------------------------- MODULE account_record ---------------------------- + +(** + The most basic implementation of accounts, which is a union of normal and escrow accounts + Represented via records. +*) + +EXTENDS identifiers + +CONSTANT + AccountIds + +NullAccount == [ + port |-> NullId, + channel |-> NullId, + id |-> NullId +] + +Accounts == [ + port: Identifiers, + channel: Identifiers, + id: AccountIds +] + +MakeEscrowAccount(port, channel) == [ + port |-> port, + channel |-> channel, + id |-> NullId +] + +MakeAccount(accountId) == [ + port |-> NullId, + channel |-> NullId, + id |-> accountId +] + + +ACCOUNT == INSTANCE account +AccountTypeOK == ACCOUNT!AccountTypeOK + + +============================================================================= +\* Modification History +\* Last modified Thu Nov 19 18:21:46 CET 2020 by c +\* Last modified Thu Nov 05 14:49:10 CET 2020 by andrey +\* Created Thu Nov 05 13:22:40 CET 2020 by andrey diff --git a/applications/transfer/keeper/relay_model/apalache-to-relay-test.json b/applications/transfer/keeper/relay_model/apalache-to-relay-test.json new file mode 100644 index 0000000000..c8d70a3332 --- /dev/null +++ b/applications/transfer/keeper/relay_model/apalache-to-relay-test.json @@ -0,0 +1,100 @@ +{ + "description": "Transforms an Apalache counterexample into the test for ICS20 Token Transfer OnRecvPacket", + "usage": "jsonatr --use apalache-to-recv-test.json --in counterexample.json --out recv-test.json", + "input": [ + { + "name": "history", + "description": "extract history from the last state of Apalache CE", + "kind": "INLINE", + "source": "$.declarations[-2].body.and..[?(@.eq == 'history')].arg.atat..arg.record" + }, + { + "name": "bankRecordToBalance", + "description": "", + "kind": "INLINE", + "source": { + "address": [ + "$.colonGreater.tuple[0]..[?(@.key.str == 'port')].value.str | unwrap", + "$.colonGreater.tuple[0]..[?(@.key.str == 'channel')].value.str | unwrap", + "$.colonGreater.tuple[0]..[?(@.key.str == 'id')].value.str | unwrap" + ], + "denom": [ + "$.colonGreater.tuple[1]..[?(@.key.str == 'port')].value.str | unwrap", + "$.colonGreater.tuple[1]..[?(@.key.str == 'channel')].value.str | unwrap", + "$.colonGreater.tuple[1]..[?(@.key.str == 'denom')].value.str | unwrap" + ], + "amount": "$.arg | unwrap" + } + }, + { + "name": "bankBefore", + "description": "extract bankBefore from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'bankBefore')].value.atat | unwrap | map(bankRecordToBalance)" + }, + { + "name": "bankAfter", + "description": "extract bankAfter from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'bankAfter')].value.atat | unwrap | map(bankRecordToBalance)" + }, + { + "name": "packet", + "description": "extract packet from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'packet')].value.record" + }, + { + "name": "packetData", + "description": "extract bankAfter from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'data')].value.record" + }, + { + "name": "packetDataDenom", + "description": "extract bankAfter from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'data')].value.record.[?(@.key.str == 'denomTrace')].value.record" + }, + { + "name": "packetRecord", + "description": "decompose packet", + "kind": "INLINE", + "source": { + "sourceChannel" : "$.[?(@.key.str == 'sourceChannel')].value.str | unwrap", + "sourcePort" : "$.[?(@.key.str == 'sourcePort')].value.str | unwrap", + "destChannel" : "$.[?(@.key.str == 'destChannel')].value.str | unwrap", + "destPort" : "$.[?(@.key.str == 'destPort')].value.str | unwrap", + "data": { + "sender": "$packetData.[?(@.key.str == 'sender')].value.str | unwrap", + "receiver": "$packetData.[?(@.key.str == 'receiver')].value.str | unwrap", + "amount": "$packetData.[?(@.key.str == 'amount')].value | unwrap", + "denom": [ + "$packetDataDenom.[?(@.key.str == 'port')].value.str | unwrap", + "$packetDataDenom.[?(@.key.str == 'channel')].value.str | unwrap", + "$packetDataDenom.[?(@.key.str == 'denom')].value.str | unwrap" + ] + } + } + }, + { + "name": "handler", + "description": "extract handler from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'handler')].value.str" + }, + { + "name": "historyState", + "description": "decompose single history state", + "kind": "INLINE", + "source": { + "packet": "$packet | unwrap | packetRecord", + "handler": "$handler | unwrap", + "bankBefore": "$bankBefore", + "bankAfter": "$bankAfter", + "error": "$..[?(@.key.str == 'error')].value | unwrap" + } + } + ], + "output": "$history[1:] | map(historyState)" +} \ No newline at end of file diff --git a/applications/transfer/keeper/relay_model/apalache-to-relay-test2.json b/applications/transfer/keeper/relay_model/apalache-to-relay-test2.json new file mode 100644 index 0000000000..a2c821c4db --- /dev/null +++ b/applications/transfer/keeper/relay_model/apalache-to-relay-test2.json @@ -0,0 +1,104 @@ +{ + "description": "Transforms an Apalache counterexample into the test for ICS20 Token Transfer OnRecvPacket", + "usage": "jsonatr --use apalache-to-recv-test.json --in counterexample.json --out recv-test.json", + "input": [ + { + "name": "history", + "description": "extract history from the last state of Apalache CE", + "kind": "INLINE", + "source": "$.declarations[-2].body.and..[?(@.eq == 'history')].arg.atat..arg.record" + }, + { + "name": "bankRecordToBalance", + "description": "", + "kind": "INLINE", + "source": { + "address": [ + "$.colonGreater.tuple[0]..[?(@.key.str == 'port')].value.str | unwrap", + "$.colonGreater.tuple[0]..[?(@.key.str == 'channel')].value.str | unwrap", + "$.colonGreater.tuple[0]..[?(@.key.str == 'id')].value.str | unwrap" + ], + "denom": [ + "$.colonGreater.tuple[1]..[?(@.key.str == 'prefix1')].value..[?(@.key.str == 'port')].value.str | unwrap", + "$.colonGreater.tuple[1]..[?(@.key.str == 'prefix1')].value..[?(@.key.str == 'channel')].value.str | unwrap", + "$.colonGreater.tuple[1]..[?(@.key.str == 'prefix0')].value..[?(@.key.str == 'port')].value.str | unwrap", + "$.colonGreater.tuple[1]..[?(@.key.str == 'prefix0')].value..[?(@.key.str == 'channel')].value.str | unwrap", + "$.colonGreater.tuple[1]..[?(@.key.str == 'denom')].value.str | unwrap" + ], + "amount": "$.arg | unwrap" + } + }, + { + "name": "bankBefore", + "description": "extract bankBefore from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'bankBefore')].value.atat | unwrap | map(bankRecordToBalance)" + }, + { + "name": "bankAfter", + "description": "extract bankAfter from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'bankAfter')].value.atat | unwrap | map(bankRecordToBalance)" + }, + { + "name": "packet", + "description": "extract packet from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'packet')].value.record" + }, + { + "name": "packetData", + "description": "extract bankAfter from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'data')].value.record" + }, + { + "name": "packetDataDenom", + "description": "extract bankAfter from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'data')].value.record.[?(@.key.str == 'denomTrace')].value.record" + }, + { + "name": "packetRecord", + "description": "decompose packet", + "kind": "INLINE", + "source": { + "sourceChannel" : "$.[?(@.key.str == 'sourceChannel')].value.str | unwrap", + "sourcePort" : "$.[?(@.key.str == 'sourcePort')].value.str | unwrap", + "destChannel" : "$.[?(@.key.str == 'destChannel')].value.str | unwrap", + "destPort" : "$.[?(@.key.str == 'destPort')].value.str | unwrap", + "data": { + "sender": "$packetData.[?(@.key.str == 'sender')].value.str | unwrap", + "receiver": "$packetData.[?(@.key.str == 'receiver')].value.str | unwrap", + "amount": "$packetData.[?(@.key.str == 'amount')].value | unwrap", + "denom": [ + "$packetDataDenom.[?(@.key.str == 'prefix1')].value..[?(@.key.str == 'port')].value.str | unwrap", + "$packetDataDenom.[?(@.key.str == 'prefix1')].value..[?(@.key.str == 'channel')].value.str | unwrap", + "$packetDataDenom.[?(@.key.str == 'prefix0')].value..[?(@.key.str == 'port')].value.str | unwrap", + "$packetDataDenom.[?(@.key.str == 'prefix0')].value..[?(@.key.str == 'channel')].value.str | unwrap", + "$packetDataDenom.[?(@.key.str == 'denom')].value.str | unwrap" + ] + } + } + }, + { + "name": "handler", + "description": "extract handler from the history state", + "kind": "INLINE", + "source": "$..[?(@.key.str == 'handler')].value.str" + }, + { + "name": "historyState", + "description": "decompose single history state", + "kind": "INLINE", + "source": { + "packet": "$packet | unwrap | packetRecord", + "handler": "$handler | unwrap", + "bankBefore": "$bankBefore", + "bankAfter": "$bankAfter", + "error": "$..[?(@.key.str == 'error')].value | unwrap" + } + } + ], + "output": "$history[1:] | map(historyState)" +} \ No newline at end of file diff --git a/applications/transfer/keeper/relay_model/denom.tla b/applications/transfer/keeper/relay_model/denom.tla new file mode 100644 index 0000000000..f729e7e14f --- /dev/null +++ b/applications/transfer/keeper/relay_model/denom.tla @@ -0,0 +1,50 @@ +-------------------------- MODULE denom ---------------------------- + +(** + The denomination traces interface; please ignore the definition bodies. +*) + +EXTENDS identifiers + +CONSTANT + Denoms + +\* A non-account +NullDenomTrace == "NullDenomTrace" + +\* All denomination traces +DenomTraces == {NullDenomTrace} + +\* Make a new denomination trace from the port/channel prefix and the basic denom +MakeDenomTrace(port, channel, denom) == NullDenomTrace + +\* Get the denomination trace port +GetPort(trace) == NullId + +\* Get the denomination trace port +GetChannel(trace) == NullId + +\* Get the denomination trace basic denomination +GetDenom(trace) == NullDenomTrace + +\* Is this denomination trace a native denomination, or is it a prefixed trace +\* Note that those cases are exclusive, but not exhaustive +IsNativeDenomTrace(trace) == GetPort(trace) = NullId /\ GetChannel(trace) = NullId +IsPrefixedDenomTrace(trace) == GetPort(trace) /= NullId /\ GetChannel(trace) /= NullId + +DenomTypeOK == + /\ NullDenomTrace \in DenomTraces + /\ \A p \in Identifiers, c \in Identifiers, d \in Denoms: + MakeDenomTrace(p, c, d) \in DenomTraces + /\ \A t \in DenomTraces: + /\ GetPort(t) \in Identifiers + /\ GetChannel(t) \in Identifiers + /\ GetDenom(t) \in DenomTraces + + + + +============================================================================= +\* Modification History +\* Last modified Thu Nov 05 15:49:23 CET 2020 by andrey +\* Created Thu Nov 05 13:22:40 CET 2020 by andrey diff --git a/applications/transfer/keeper/relay_model/denom_record.tla b/applications/transfer/keeper/relay_model/denom_record.tla new file mode 100644 index 0000000000..2eb0d06f1d --- /dev/null +++ b/applications/transfer/keeper/relay_model/denom_record.tla @@ -0,0 +1,53 @@ +-------------------------- MODULE denom_record ---------------------------- + +(** + The most basic implementation of denomination traces that allows only one-step sequences + Represented via records +*) + +EXTENDS identifiers + +CONSTANT + Denoms + +MaxDenomLength == 3 + +DenomTraces == [ + port: Identifiers, + channel: Identifiers, + denom: Denoms +] + +NullDenomTrace == [ + port |-> NullId, + channel |-> NullId, + denom |-> NullId +] + +GetPort(trace) == trace.port +GetChannel(trace) == trace.channel +GetDenom(trace) == trace.denom + +IsNativeDenomTrace(trace) == GetPort(trace) = NullId /\ GetChannel(trace) = NullId /\ GetDenom(trace) /= NullId +IsPrefixedDenomTrace(trace) == GetPort(trace) /= NullId /\ GetChannel(trace) /= NullId /\ GetDenom(trace) /= NullId + +ExtendDenomTrace(port, channel, trace) == + IF GetPort(trace) = NullId /\ GetChannel(trace) = NullId + THEN + [ + port |-> port, + channel |-> channel, + denom |-> trace.denom + ] + ELSE + NullDenomTrace + + +DENOM == INSTANCE denom +DenomTypeOK == DENOM!DenomTypeOK + + +============================================================================= +\* Modification History +\* Last modified Thu Nov 05 16:41:47 CET 2020 by andrey +\* Created Thu Nov 05 13:22:40 CET 2020 by andrey diff --git a/applications/transfer/keeper/relay_model/denom_record2.tla b/applications/transfer/keeper/relay_model/denom_record2.tla new file mode 100644 index 0000000000..a49d6c98de --- /dev/null +++ b/applications/transfer/keeper/relay_model/denom_record2.tla @@ -0,0 +1,114 @@ +-------------------------- MODULE denom_record2 ---------------------------- + +(** + The implementation of denomination traces that allows one- or two-step sequences + Represented via records +*) + +EXTENDS identifiers + +CONSTANT + Denoms + +MaxDenomLength == 5 + +DenomPrefixes == [ + port: Identifiers, + channel: Identifiers +] + +NullDenomPrefix == [ + port |-> NullId, + channel |-> NullId +] + +MakeDenomPrefix(port, channel) == [ + port |-> port, + channel |-> channel +] + +IsValidDenomPrefix(prefix) == + /\ prefix.port /= NullId + /\ prefix.channel /= NullId + +DenomTraces == [ + prefix1: DenomPrefixes, \* the most recent prefix + prefix0: DenomPrefixes, \* the deepest prefix + denom: Denoms +] + +NullDenomTrace == [ + prefix1 |-> NullDenomPrefix, + prefix0 |-> NullDenomPrefix, + denom |-> NullId +] + + +TraceLen(trace) == + IF trace.prefix0 = NullDenomPrefix + THEN 1 + ELSE IF trace.prefix1 = NullDenomPrefix + THEN 3 + ELSE 5 + +LatestPrefix(trace) == + IF trace.prefix0 = NullDenomPrefix + THEN NullDenomPrefix + ELSE IF trace.prefix1 = NullDenomPrefix + THEN trace.prefix0 + ELSE trace.prefix1 + + +ExtendDenomTrace(port, channel, trace) == + IF trace.prefix0 = NullDenomPrefix + THEN [ + prefix1 |-> NullDenomPrefix, + prefix0 |-> MakeDenomPrefix(port, channel), + denom |-> trace.denom + ] + ELSE IF trace.prefix1 = NullDenomPrefix + THEN [ + prefix1 |-> MakeDenomPrefix(port, channel), + prefix0 |-> trace.prefix0, + denom |-> trace.denom + ] + ELSE NullDenomTrace \* can extend only for two steps + +ReduceDenomTrace(trace) == + IF trace.prefix1 /= NullDenomPrefix + THEN [ + prefix1 |-> NullDenomPrefix, + prefix0 |-> trace.prefix0, + denom |-> trace.denom + ] + ELSE IF trace.prefix0 /= NullDenomPrefix + THEN [ + prefix1 |-> NullDenomPrefix, + prefix0 |-> NullDenomPrefix, + denom |-> trace.denom + ] + ELSE NullDenomTrace \* cannot reduce further + +GetPort(trace) == LatestPrefix(trace).port +GetChannel(trace) == LatestPrefix(trace).channel +GetDenom(trace) == trace.denom + +IsValidDenomTrace(trace) == + /\ GetDenom(trace) /= NullId + /\ IF IsValidDenomPrefix(trace.prefix1) + THEN IsValidDenomPrefix(trace.prefix0) + ELSE + /\ trace.prefix1 = NullDenomPrefix + /\ (IsValidDenomPrefix(trace.prefix0) \/ trace.prefix0 = NullDenomPrefix) + +IsNativeDenomTrace(trace) == LatestPrefix(trace) = NullDenomPrefix /\ GetDenom(trace) /= NullId +IsPrefixedDenomTrace(trace) == LatestPrefix(trace) /= NullDenomPrefix /\ GetDenom(trace) /= NullId + +DENOM == INSTANCE denom +DenomTypeOK == DENOM!DenomTypeOK + + +============================================================================= +\* Modification History +\* Last modified Fri Dec 04 10:38:10 CET 2020 by andrey +\* Created Fri Dec 04 10:22:10 CET 2020 by andrey diff --git a/applications/transfer/keeper/relay_model/denom_sequence.tla b/applications/transfer/keeper/relay_model/denom_sequence.tla new file mode 100644 index 0000000000..29b5f4edf2 --- /dev/null +++ b/applications/transfer/keeper/relay_model/denom_sequence.tla @@ -0,0 +1,47 @@ +-------------------------- MODULE denom_sequence ---------------------------- + +(** + The implementation of denomination traces via sequences +*) + +EXTENDS Integers, Sequences, identifiers + +CONSTANT + Denoms, + MaxDenomLength + + +a <: b == a +AsAddress(seq) == seq <: Seq(STRING) + +UNROLL_DEFAULT_GenSeq == { AsAddress(<< >>) } +UNROLL_TIMES_GenSeq == 5 + +\* This produces denomination sequences up to the given bound +RECURSIVE GenSeq(_) +GenSeq(n) == + IF n = 0 THEN { AsAddress(<< >>) } + ELSE LET Shorter == GenSeq(n-1) IN + { Append(s,x): x \in Identifiers, s \in Shorter } \union Shorter + +DenomTraces == GenSeq(MaxDenomLength) + +ExtendDenomTrace(port, channel, denom) == AsAddress(<>) \o denom + +GetPort(trace) == trace[1] +GetChannel(trace) == trace[2] +GetDenom(trace) == SubSeq(trace, 3, Len(trace)) + +NullDenomTrace == AsAddress(<< >>) + +IsNativeDenomTrace(trace) == GetPort(trace) = NullId /\ GetChannel(trace) = NullId /\ GetDenom(trace) /= NullDenomTrace +IsPrefixedDenomTrace(trace) == GetPort(trace) /= NullId /\ GetChannel(trace) /= NullId /\ GetDenom(trace) /= NullDenomTrace + +DENOM == INSTANCE denom +DenomTypeOK == DENOM!DenomTypeOK + + +============================================================================= +\* Modification History +\* Last modified Thu Nov 05 15:29:21 CET 2020 by andrey +\* Created Thu Nov 05 13:22:40 CET 2020 by andrey diff --git a/applications/transfer/keeper/relay_model/identifiers.tla b/applications/transfer/keeper/relay_model/identifiers.tla new file mode 100644 index 0000000000..089f276d8c --- /dev/null +++ b/applications/transfer/keeper/relay_model/identifiers.tla @@ -0,0 +1,10 @@ +-------------------------- MODULE identifiers ---------------------------- + +CONSTANT + Identifiers, + NullId + +============================================================================= +\* Modification History +\* Last modified Thu Nov 05 13:23:12 CET 2020 by andrey +\* Created Thu Nov 05 13:22:40 CET 2020 by andrey diff --git a/applications/transfer/keeper/relay_model/relay.tla b/applications/transfer/keeper/relay_model/relay.tla new file mode 100644 index 0000000000..029df3d7c7 --- /dev/null +++ b/applications/transfer/keeper/relay_model/relay.tla @@ -0,0 +1,278 @@ +-------------------------- MODULE relay ---------------------------- +(** + * A primitive model for account arithmetics and token movement + * of the Cosmos SDK ICS20 Token Transfer + * We completely abstract away many details, + * and want to focus on a minimal spec useful for testing + * + * We also try to make the model modular in that it uses + * denomination traces and accounts via abstract interfaces, + * outlined in denom.tla and account.tla + *) + +EXTENDS Integers, FiniteSets, Sequences, identifiers, denom_record2, account_record + +CONSTANT + MaxAmount + +VARIABLE + error, + bank, + p, \* we want to start with generating single packets, + handler, + history, + count + +Amounts == 0..MaxAmount + +GetSourceEscrowAccount(packet) == MakeEscrowAccount(packet.sourcePort, packet.sourceChannel) +GetDestEscrowAccount(packet) == MakeEscrowAccount(packet.destPort, packet.destChannel) + +FungibleTokenPacketData == [ + sender: AccountIds, + receiver: AccountIds, + denomTrace: DenomTraces, + amount: Amounts +] + +Packets == [ + \* We abstract those packet fields away + \* sequence: uint64 + \* timeoutHeight: Height + \* timeoutTimestamp: uint64 + sourcePort: Identifiers, + sourceChannel: Identifiers, + destPort: Identifiers, + destChannel: Identifiers, + data: FungibleTokenPacketData +] + + +IsSource(packet) == + /\ GetPort(packet.data.denomTrace) = packet.sourcePort + /\ GetChannel(packet.data.denomTrace) = packet.sourceChannel + +\* This function models the port and channel checks that happen when the packet is sent +IsValidSendChannel(packet) == + /\ packet.sourcePort = "transfer" + /\ (packet.sourceChannel = "channel-0" \/ packet.sourceChannel = "channel-1") + /\ packet.destPort = "transfer" + /\ packet.destChannel = "channel-0" + +\* This function models the port and channel checks that happen when relay gets the packet +IsValidRecvChannel(packet) == + /\ packet.sourcePort = "transfer" + /\ packet.sourceChannel = "channel-0" + /\ packet.destPort = "transfer" + /\ (packet.destChannel = "channel-0" \/ packet.destChannel = "channel-1") + + +WellFormedPacket(packet) == + /\ packet.sourcePort /= NullId + /\ packet.sourceChannel /= NullId + /\ packet.destPort /= NullId + /\ packet.destChannel /= NullId + +BankWithAccount(abank, account, denom) == + IF <> \in DOMAIN abank + THEN abank + ELSE [x \in DOMAIN bank \union { <> } + |-> IF x = <> + THEN 0 + ELSE bank[x] ] + +IsKnownDenomTrace(trace) == + \E account \in Accounts : + <> \in DOMAIN bank + + +SendTransferPre(packet, pbank) == + LET data == packet.data + trace == data.denomTrace + sender == data.sender + amount == data.amount + escrow == GetSourceEscrowAccount(packet) + IN + /\ WellFormedPacket(packet) + /\ IsValidSendChannel(packet) + /\ IsNativeDenomTrace(trace) \/ (IsValidDenomTrace(trace) /\ IsKnownDenomTrace(trace)) + /\ data.sender /= NullId + /\ <> \in DOMAIN pbank + /\ \/ amount = 0 \* SendTrasfer actually allows for 0 amount + \/ <> \in DOMAIN pbank /\ bank[MakeAccount(sender), trace] >= amount + +SendTransferNext(packet) == + LET data == packet.data IN + LET denom == GetDenom(data.denomTrace) IN + LET amount == data.amount IN + LET sender == data.sender IN + LET escrow == GetSourceEscrowAccount(packet) IN + LET bankwithescrow == BankWithAccount(bank, escrow, data.denomTrace) IN + IF SendTransferPre(packet,bankwithescrow) + THEN + /\ error' = FALSE + \*/\ IBCsend(chain, packet) + /\ IF ~IsSource(packet) + \* This is how the check is encoded in ICS20 and the implementation. + \* The meaning is "IF denom = AsAddress(NativeDenom)" because of the following argument: + \* observe that due to the disjunction in SendTransferPre(packet), we have + \* ~IsSource(packet) /\ SendTransferPre(packet) => denom = AsAddress(NativeDenom) + THEN + \* tokens are from this chain + \* transfer tokens from sender into escrow account + bank' = [bankwithescrow EXCEPT ![MakeAccount(sender), data.denomTrace] = @ - amount, + ![escrow, data.denomTrace] = @ + amount] + ELSE + \* tokens are from other chain. We forward them. + \* burn sender's money + bank' = [bankwithescrow EXCEPT ![MakeAccount(sender), data.denomTrace] = @ - amount] + ELSE + /\ error' = TRUE + /\ UNCHANGED bank + + +OnRecvPacketPre(packet) == + LET data == packet.data + trace == data.denomTrace + denom == GetDenom(trace) + amount == data.amount + IN + /\ WellFormedPacket(packet) + /\ IsValidRecvChannel(packet) + /\ IsValidDenomTrace(trace) + /\ amount > 0 + \* if there is no receiver account, it is created by the bank + /\ data.receiver /= NullId + /\ IsSource(packet) => + LET escrow == GetDestEscrowAccount(packet) IN + LET denomTrace == ReduceDenomTrace(trace) IN + /\ <> \in DOMAIN bank + /\ bank[escrow, denomTrace] >= amount + + +OnRecvPacketNext(packet) == + LET data == packet.data IN + LET trace == data.denomTrace IN + LET denom == GetDenom(trace) IN + LET amount == data.amount IN + LET receiver == data.receiver IN + /\ IF OnRecvPacketPre(packet) + THEN + \* This condition is necessary so that denomination traces do not exceed the maximum length + /\ (IsSource(packet) \/ TraceLen(trace) < MaxDenomLength) + /\ error' = FALSE + /\ IF IsSource(packet) + THEN + \* transfer from the escrow account to the receiver account + LET denomTrace == ReduceDenomTrace(trace) IN + LET escrow == GetDestEscrowAccount(packet) IN + LET bankwithreceiver == BankWithAccount(bank, MakeAccount(receiver), denomTrace) IN + bank' = [bankwithreceiver + EXCEPT ![MakeAccount(receiver), denomTrace] = @ + amount, + ![escrow, denomTrace] = @ - amount] + ELSE + \* create new tokens with new denomination and transfer it to the receiver account + LET denomTrace == ExtendDenomTrace(packet.destPort, packet.destChannel, trace) IN + LET bankwithreceiver == + BankWithAccount(bank, MakeAccount(receiver), denomTrace) IN + bank' = [bankwithreceiver + EXCEPT ![MakeAccount(receiver), denomTrace] = @ + amount] + ELSE + /\ error' = TRUE + /\ UNCHANGED bank + + +OnTimeoutPacketPre(packet) == + LET data == packet.data + trace == data.denomTrace + denom == GetDenom(trace) + amount == data.amount + IN + /\ WellFormedPacket(packet) + /\ IsValidSendChannel(packet) + /\ IsValidDenomTrace(trace) + /\ data.sender /= NullId + /\ ~IsSource(packet) => + LET escrow == GetSourceEscrowAccount(packet) + IN /\ <> \in DOMAIN bank + /\ bank[escrow, trace] >= amount + + +OnTimeoutPacketNext(packet) == + LET data == packet.data IN + LET trace == data.denomTrace IN + LET denom == GetDenom(data.denomTrace) IN + LET amount == data.amount IN + LET sender == data.sender IN + LET bankwithsender == BankWithAccount(bank, MakeAccount(sender), trace) IN + IF OnTimeoutPacketPre(packet) + THEN + /\ error' = FALSE + /\ IF ~IsSource(packet) + THEN + \* transfer from the escrow acount to the sender account + \* LET denomsuffix == SubSeq(denom, 3, Len(denom)) IN + LET escrow == GetSourceEscrowAccount(packet) IN + bank' = [bankwithsender + EXCEPT ![MakeAccount(sender), trace] = @ + amount, + ![escrow, trace] = @ - amount] + ELSE + \* mint back the money + bank' = [bankwithsender EXCEPT ![MakeAccount(sender), trace] = @ + amount] + + ELSE + /\ error' = TRUE + /\ UNCHANGED bank + + +OnAcknowledgementPacketResultNext(packet) == + IF WellFormedPacket(packet) + THEN + /\ error' = FALSE + /\ UNCHANGED bank + ELSE + /\ error' = TRUE + /\ UNCHANGED bank + + +OnAcknowledgementPacketErrorNext(packet) == + OnTimeoutPacketNext(packet) + +Init == + /\ p \in Packets + /\ bank = [ x \in {<>} |-> 0 ] + /\ count = 0 + /\ history = [ + n \in {0} |-> [ + error |-> FALSE, + packet |-> p, + handler |-> "", + bankBefore |-> bank, + bankAfter |-> bank + ] + ] + /\ error = FALSE + /\ handler = "" + +Next == + /\ p' \in Packets + /\ count'= count + 1 + /\ + \/ (SendTransferNext(p) /\ handler' = "SendTransfer") + \/ (OnRecvPacketNext(p) /\ handler' = "OnRecvPacket") + \/ (OnTimeoutPacketNext(p) /\ handler' = "OnTimeoutPacket") + \/ (OnAcknowledgementPacketResultNext(p) /\ handler' = "OnRecvAcknowledgementResult") + \/ (OnAcknowledgementPacketErrorNext(p) /\ handler' = "OnRecvAcknowledgementError") + /\ history' = [ n \in DOMAIN history \union {count'} |-> + IF n = count' THEN + [ packet |-> p, handler |-> handler', error |-> error', bankBefore |-> bank, bankAfter |-> bank' ] + ELSE history[n] + ] + +============================================================================= +\* Modification History +\* Last modified Wed Dec 2 10:15:45 CET 2020 by andrey +\* Last modified Fri Nov 20 12:37:38 CET 2020 by c +\* Last modified Thu Nov 05 20:56:37 CET 2020 by andrey +\* Last modified Fri Oct 30 21:52:38 CET 2020 by widder +\* Created Thu Oct 29 20:45:55 CET 2020 by andrey diff --git a/applications/transfer/keeper/relay_model/relay_tests.tla b/applications/transfer/keeper/relay_model/relay_tests.tla new file mode 100644 index 0000000000..7e7577526d --- /dev/null +++ b/applications/transfer/keeper/relay_model/relay_tests.tla @@ -0,0 +1,96 @@ +-------------------------- MODULE relay_tests ---------------------------- + +EXTENDS Integers, FiniteSets + +Identifiers == {"", "transfer", "channel-0", "channel-1", "cosmos-hub", "ethereum-hub", "bitcoin-hub"} +NullId == "" +MaxAmount == 5 +Denoms == {"", "atom", "eth", "btc" } +AccountIds == {"", "a1", "a2", "a3" } + +VARIABLES error, bank, p, count, history, handler + +INSTANCE relay + +\************************** Tests ****************************** + +\* Generic test for handler pass +TestHandlerPass(handlerName) == + \E s \in DOMAIN history : + /\ history[s].handler = handlerName + /\ history[s].error = FALSE + /\ history[s].packet.data.amount > 0 + +\* Generic test for handler fail +TestHandlerFail(handlerName) == + \E s \in DOMAIN history : + /\ history[s].handler = handlerName + /\ history[s].error = TRUE + /\ history[s].packet.data.amount > 0 + +TestSendTransferPass == TestHandlerPass("SendTransfer") +TestSendTransferPassInv == ~TestSendTransferPass + +TestSendTransferFail == TestHandlerFail("SendTransfer") +TestSendTransferFailInv == ~TestSendTransferFail + +TestOnRecvPacketPass == TestHandlerPass("OnRecvPacket") +TestOnRecvPacketPassInv == ~TestOnRecvPacketPass + +TestOnRecvPacketFail == TestHandlerFail("OnRecvPacket") +TestOnRecvPacketFailInv == ~TestOnRecvPacketFail + +TestOnTimeoutPass == TestHandlerPass("OnTimeoutPacket") +TestOnTimeoutPassInv == ~TestOnTimeoutPass + +TestOnTimeoutFail == TestHandlerFail("OnTimeoutPacket") +TestOnTimeoutFailInv == ~TestOnTimeoutFail + +TestOnRecvAcknowledgementResultPass == TestHandlerPass("OnRecvAcknowledgementResult") +TestOnRecvAcknowledgementResultPassInv == ~TestOnRecvAcknowledgementResultPass + +TestOnRecvAcknowledgementResultFail == TestHandlerFail("OnRecvAcknowledgementResult") +TestOnRecvAcknowledgementResultFailInv == ~TestOnRecvAcknowledgementResultFail + +TestOnRecvAcknowledgementErrorPass == TestHandlerPass("OnRecvAcknowledgementError") +TestOnRecvAcknowledgementErrorPassInv == ~TestOnRecvAcknowledgementErrorPass + +TestOnRecvAcknowledgementErrorFail == TestHandlerFail("OnRecvAcknowledgementError") +TestOnRecvAcknowledgementErrorFailInv == ~TestOnRecvAcknowledgementErrorFail + +Test5Packets == + count >= 5 + +Test5PacketsInv == ~Test5Packets + +Test5Packets2Different == + /\ count >= 5 + /\ \E s1, s2 \in DOMAIN history : + history[s1].handler /= history[s2].handler + +Test5Packets2DifferentInv == ~Test5Packets2Different + +Test5PacketsAllDifferent == + /\ count >= 5 + /\ \A s1, s2 \in DOMAIN history : + s1 /= s2 => history[s1].handler /= history[s2].handler + +Test5PacketsAllDifferentInv == ~Test5PacketsAllDifferent + +Test5PacketsAllDifferentPass == + /\ Test5PacketsAllDifferent + /\ \A s \in DOMAIN history : + s > 0 => + /\ history[s].error = FALSE + /\ history[s].packet.data.amount > 0 + +Test5PacketsAllDifferentPassInv == ~Test5PacketsAllDifferentPass + +TestUnescrowTokens == + \E s \in DOMAIN history : + /\ IsSource(history[s].packet) + /\ history[s].handler = "OnRecvPacket" + /\ history[s].error = FALSE +TestUnescrowTokensInv == ~TestUnescrowTokens + +============================================================================= diff --git a/applications/transfer/keeper/relay_test.go b/applications/transfer/keeper/relay_test.go new file mode 100644 index 0000000000..89058ac295 --- /dev/null +++ b/applications/transfer/keeper/relay_test.go @@ -0,0 +1,392 @@ +package keeper_test + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/simapp" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +// test sending from chainA to chainB using both coin that orignate on +// chainA and coin that orignate on chainB +func (suite *KeeperTestSuite) TestSendTransfer() { + var ( + amount sdk.Coin + channelA, channelB ibctesting.TestChannel + err error + ) + + testCases := []struct { + msg string + malleate func() + sendFromSource bool + expPass bool + }{ + {"successful transfer from source chain", + func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100)) + }, true, true}, + {"successful transfer with coin from counterparty chain", + func() { + // send coin from chainA back to chainB + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + amount = types.GetTransferCoin(channelA.PortID, channelA.ID, sdk.DefaultBondDenom, 100) + }, false, true}, + {"source channel not found", + func() { + // channel references wrong ID + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + channelA.ID = ibctesting.InvalidID + amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100)) + }, true, false}, + {"next seq send not found", + func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + channelB = suite.chainB.NextTestChannel(connB, ibctesting.TransferPort) + // manually create channel so next seq send is never set + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainA.GetContext(), + channelA.PortID, channelA.ID, + channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, channeltypes.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, ibctesting.DefaultChannelVersion), + ) + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100)) + }, true, false}, + + // createOutgoingPacket tests + // - source chain + {"send coin failed", + func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + amount = sdk.NewCoin("randomdenom", sdk.NewInt(100)) + }, true, false}, + // - receiving chain + {"send from module account failed", + func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + amount = types.GetTransferCoin(channelA.PortID, channelA.ID, " randomdenom", 100) + }, false, false}, + {"channel capability not found", + func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + cap := suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + + // Release channel capability + suite.chainA.App.ScopedTransferKeeper.ReleaseCapability(suite.chainA.GetContext(), cap) + amount = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100)) + }, true, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + + if !tc.sendFromSource { + // send coin from chainB to chainA + coinFromBToA := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100)) + transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0) + err = suite.coordinator.SendMsg(suite.chainB, suite.chainA, channelA.ClientID, transferMsg) + suite.Require().NoError(err) // message committed + + // receive coin on chainA from chainB + fungibleTokenPacket := types.NewFungibleTokenPacketData(coinFromBToA.Denom, coinFromBToA.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String()) + packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 110), 0) + + // get proof of packet commitment from chainB + packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + proof, proofHeight := suite.chainB.QueryProof(packetKey) + + recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainA.SenderAccount.GetAddress()) + err = suite.coordinator.SendMsg(suite.chainA, suite.chainB, channelB.ClientID, recvMsg) + suite.Require().NoError(err) // message committed + } + + err = suite.chainA.App.TransferKeeper.SendTransfer( + suite.chainA.GetContext(), channelA.PortID, channelA.ID, amount, + suite.chainA.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test receiving coin on chainB with coin that orignate on chainA and +// coin that orignated on chainB (source). The bulk of the testing occurs +// in the test case for loop since setup is intensive for all cases. The +// malleate function allows for testing invalid cases. +func (suite *KeeperTestSuite) TestOnRecvPacket() { + var ( + channelA, channelB ibctesting.TestChannel + trace types.DenomTrace + amount sdk.Int + receiver string + ) + + testCases := []struct { + msg string + malleate func() + recvIsSource bool // the receiving chain is the source of the coin originally + expPass bool + }{ + {"success receive on source chain", func() {}, true, true}, + {"success receive with coin from another chain as source", func() {}, false, true}, + {"empty coin", func() { + trace = types.DenomTrace{} + amount = sdk.ZeroInt() + }, true, false}, + {"invalid receiver address", func() { + receiver = "gaia1scqhwpgsmr6vmztaa7suurfl52my6nd2kmrudl" + }, true, false}, + + // onRecvPacket + // - coin from chain chainA + {"failure: mint zero coin", func() { + amount = sdk.ZeroInt() + }, false, false}, + + // - coin being sent back to original chain (chainB) + {"tries to unescrow more tokens than allowed", func() { + amount = sdk.NewInt(1000000) + }, true, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + clientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + receiver = suite.chainB.SenderAccount.GetAddress().String() // must be explicitly changed in malleate + + amount = sdk.NewInt(100) // must be explicitly changed in malleate + seq := uint64(1) + + if tc.recvIsSource { + // send coin from chainB to chainA, receive them, acknowledge them, and send back to chainB + coinFromBToA := sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100)) + transferMsg := types.NewMsgTransfer(channelB.PortID, channelB.ID, coinFromBToA, suite.chainB.SenderAccount.GetAddress(), suite.chainA.SenderAccount.GetAddress().String(), clienttypes.NewHeight(0, 110), 0) + err := suite.coordinator.SendMsg(suite.chainB, suite.chainA, channelA.ClientID, transferMsg) + suite.Require().NoError(err) // message committed + + // relay send packet + fungibleTokenPacket := types.NewFungibleTokenPacketData(coinFromBToA.Denom, coinFromBToA.Amount.Uint64(), suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String()) + packet := channeltypes.NewPacket(fungibleTokenPacket.GetBytes(), 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 110), 0) + ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)}) + err = suite.coordinator.RelayPacket(suite.chainB, suite.chainA, clientB, clientA, packet, ack.GetBytes()) + suite.Require().NoError(err) // relay committed + + seq++ + + // NOTE: trace must be explicitly changed in malleate to test invalid cases + trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom)) + } else { + trace = types.ParseDenomTrace(sdk.DefaultBondDenom) + } + + // send coin from chainA to chainB + transferMsg := types.NewMsgTransfer(channelA.PortID, channelA.ID, sdk.NewCoin(trace.IBCDenom(), amount), suite.chainA.SenderAccount.GetAddress(), receiver, clienttypes.NewHeight(0, 110), 0) + err := suite.coordinator.SendMsg(suite.chainA, suite.chainB, channelB.ClientID, transferMsg) + suite.Require().NoError(err) // message committed + + tc.malleate() + + data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), receiver) + packet := channeltypes.NewPacket(data.GetBytes(), seq, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0) + + err = suite.chainB.App.TransferKeeper.OnRecvPacket(suite.chainB.GetContext(), packet, data) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestOnAcknowledgementPacket tests that successful acknowledgement is a no-op +// and failure acknowledment leads to refund when attempting to send from chainA +// to chainB. If sender is source than the denomination being refunded has no +// trace. +func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() { + var ( + successAck = channeltypes.NewResultAcknowledgement([]byte{byte(1)}) + failedAck = channeltypes.NewErrorAcknowledgement("failed packet transfer") + + channelA, channelB ibctesting.TestChannel + trace types.DenomTrace + amount sdk.Int + ) + + testCases := []struct { + msg string + ack channeltypes.Acknowledgement + malleate func() + success bool // success of ack + expPass bool + }{ + {"success ack causes no-op", successAck, func() { + trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelB.PortID, channelB.ID, sdk.DefaultBondDenom)) + }, true, true}, + {"successful refund from source chain", failedAck, func() { + escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID) + trace = types.ParseDenomTrace(sdk.DefaultBondDenom) + coin := sdk.NewCoin(sdk.DefaultBondDenom, amount) + + suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin))) + }, false, true}, + {"unsuccessful refund from source", failedAck, + func() { + trace = types.ParseDenomTrace(sdk.DefaultBondDenom) + }, false, false}, + {"successful refund from with coin from external chain", failedAck, + func() { + escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID) + trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom)) + coin := sdk.NewCoin(trace.IBCDenom(), amount) + + suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin))) + }, false, true}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + _, _, _, _, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + amount = sdk.NewInt(100) // must be explicitly changed + + tc.malleate() + + data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String()) + packet := channeltypes.NewPacket(data.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0) + + preCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom()) + + err := suite.chainA.App.TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), packet, data, tc.ack) + if tc.expPass { + suite.Require().NoError(err) + postCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom()) + deltaAmount := postCoin.Amount.Sub(preCoin.Amount) + + if tc.success { + suite.Require().Equal(int64(0), deltaAmount.Int64(), "successful ack changed balance") + } else { + suite.Require().Equal(amount, deltaAmount, "failed ack did not trigger refund") + } + + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestOnTimeoutPacket test private refundPacket function since it is a simple +// wrapper over it. The actual timeout does not matter since IBC core logic +// is not being tested. The test is timing out a send from chainA to chainB +// so the refunds are occurring on chainA. +func (suite *KeeperTestSuite) TestOnTimeoutPacket() { + var ( + channelA, channelB ibctesting.TestChannel + trace types.DenomTrace + amount sdk.Int + sender string + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + {"successful timeout from sender as source chain", + func() { + escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID) + trace = types.ParseDenomTrace(sdk.DefaultBondDenom) + coin := sdk.NewCoin(trace.IBCDenom(), amount) + + suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin))) + }, true}, + {"successful timeout from external chain", + func() { + escrow := types.GetEscrowAddress(channelA.PortID, channelA.ID) + trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom)) + coin := sdk.NewCoin(trace.IBCDenom(), amount) + + suite.Require().NoError(simapp.FundAccount(suite.chainA.App, suite.chainA.GetContext(), escrow, sdk.NewCoins(coin))) + }, true}, + {"no balance for coin denom", + func() { + trace = types.ParseDenomTrace("bitcoin") + }, false}, + {"unescrow failed", + func() { + trace = types.ParseDenomTrace(sdk.DefaultBondDenom) + }, false}, + {"mint failed", + func() { + trace = types.ParseDenomTrace(types.GetPrefixedDenom(channelA.PortID, channelA.ID, sdk.DefaultBondDenom)) + amount = sdk.OneInt() + sender = "invalid address" + }, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB = suite.coordinator.CreateTransferChannels(suite.chainA, suite.chainB, connA, connB, channeltypes.UNORDERED) + amount = sdk.NewInt(100) // must be explicitly changed + sender = suite.chainA.SenderAccount.GetAddress().String() + + tc.malleate() + + data := types.NewFungibleTokenPacketData(trace.GetFullDenomPath(), amount.Uint64(), sender, suite.chainB.SenderAccount.GetAddress().String()) + packet := channeltypes.NewPacket(data.GetBytes(), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0) + + preCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom()) + + err := suite.chainA.App.TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet, data) + + postCoin := suite.chainA.App.BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), trace.IBCDenom()) + deltaAmount := postCoin.Amount.Sub(preCoin.Amount) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().Equal(amount.Int64(), deltaAmount.Int64(), "successful timeout did not trigger refund") + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/applications/transfer/module.go b/applications/transfer/module.go new file mode 100644 index 0000000000..25290d69a6 --- /dev/null +++ b/applications/transfer/module.go @@ -0,0 +1,438 @@ +package transfer + +import ( + "context" + "encoding/json" + "fmt" + "math" + "math/rand" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + + "github.com/gorilla/mux" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/client/cli" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +var ( + _ module.AppModule = AppModule{} + _ porttypes.IBCModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// AppModuleBasic is the IBC Transfer AppModuleBasic +type AppModuleBasic struct{} + +// Name implements AppModuleBasic interface +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec implements AppModuleBasic interface +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers module concrete types into protobuf Any. +func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) { + types.RegisterInterfaces(registry) +} + +// DefaultGenesis returns default genesis state as raw bytes for the ibc +// transfer module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// ValidateGenesis performs genesis state validation for the ibc transfer module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONMarshaler, config client.TxEncodingConfig, bz json.RawMessage) error { + var gs types.GenesisState + if err := cdc.UnmarshalJSON(bz, &gs); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + + return gs.Validate() +} + +// RegisterRESTRoutes implements AppModuleBasic interface +func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) { +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the ibc-transfer module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) +} + +// GetTxCmd implements AppModuleBasic interface +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.NewTxCmd() +} + +// GetQueryCmd implements AppModuleBasic interface +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// AppModule represents the AppModule for this module +type AppModule struct { + AppModuleBasic + keeper keeper.Keeper +} + +// NewAppModule creates a new 20-transfer module +func NewAppModule(k keeper.Keeper) AppModule { + return AppModule{ + keeper: k, + } +} + +// RegisterInvariants implements the AppModule interface +func (AppModule) RegisterInvariants(ir sdk.InvariantRegistry) { + // TODO +} + +// Route implements the AppModule interface +func (am AppModule) Route() sdk.Route { + return sdk.NewRoute(types.RouterKey, NewHandler(am.keeper)) +} + +// QuerierRoute implements the AppModule interface +func (AppModule) QuerierRoute() string { + return types.QuerierRoute +} + +// LegacyQuerierHandler implements the AppModule interface +func (am AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), am.keeper) + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// InitGenesis performs genesis initialization for the ibc-transfer module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data json.RawMessage) []abci.ValidatorUpdate { + var genesisState types.GenesisState + cdc.MustUnmarshalJSON(data, &genesisState) + am.keeper.InitGenesis(ctx, genesisState) + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the exported genesis state as raw bytes for the ibc-transfer +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage { + gs := am.keeper.ExportGenesis(ctx) + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock implements the AppModule interface +func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { +} + +// EndBlock implements the AppModule interface +func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +//____________________________________________________________________________ + +// AppModuleSimulation functions + +// GenerateGenesisState creates a randomized GenState of the transfer module. +func (AppModule) GenerateGenesisState(simState *module.SimulationState) { + simulation.RandomizedGenState(simState) +} + +// ProposalContents doesn't return any content functions for governance proposals. +func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent { + return nil +} + +// RandomizedParams creates randomized ibc-transfer param changes for the simulator. +func (AppModule) RandomizedParams(r *rand.Rand) []simtypes.ParamChange { + return simulation.ParamChanges(r) +} + +// RegisterStoreDecoder registers a decoder for transfer module's types +func (am AppModule) RegisterStoreDecoder(sdr sdk.StoreDecoderRegistry) { + sdr[types.StoreKey] = simulation.NewDecodeStore(am.keeper) +} + +// WeightedOperations returns the all the transfer module operations with their respective weights. +func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation { + return nil +} + +//____________________________________________________________________________ + +// ValidateTransferChannelParams does validation of a newly created transfer channel. A transfer +// channel must be UNORDERED, use the correct port (by default 'transfer'), and use the current +// supported version. Only 2^32 channels are allowed to be created. +func ValidateTransferChannelParams( + ctx sdk.Context, + keeper keeper.Keeper, + order channeltypes.Order, + portID string, + channelID string, + version string, +) error { + // NOTE: for escrow address security only 2^32 channels are allowed to be created + // Issue: https://github.com/cosmos/cosmos-sdk/issues/7737 + channelSequence, err := channeltypes.ParseChannelSequence(channelID) + if err != nil { + return err + } + if channelSequence > uint64(math.MaxUint32) { + return sdkerrors.Wrapf(types.ErrMaxTransferChannels, "channel sequence %d is greater than max allowed transfer channels %d", channelSequence, uint64(math.MaxUint32)) + } + if order != channeltypes.UNORDERED { + return sdkerrors.Wrapf(channeltypes.ErrInvalidChannelOrdering, "expected %s channel, got %s ", channeltypes.UNORDERED, order) + } + + // Require portID is the portID transfer module is bound to + boundPort := keeper.GetPort(ctx) + if boundPort != portID { + return sdkerrors.Wrapf(porttypes.ErrInvalidPort, "invalid port: %s, expected %s", portID, boundPort) + } + + if version != types.Version { + return sdkerrors.Wrapf(types.ErrInvalidVersion, "got %s, expected %s", version, types.Version) + } + return nil +} + +// OnChanOpenInit implements the IBCModule interface +func (am AppModule) OnChanOpenInit( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + chanCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version string, +) error { + if err := ValidateTransferChannelParams(ctx, am.keeper, order, portID, channelID, version); err != nil { + return err + } + + // Claim channel capability passed back by IBC module + if err := am.keeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil { + return err + } + + return nil +} + +// OnChanOpenTry implements the IBCModule interface +func (am AppModule) OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID, + channelID string, + chanCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version, + counterpartyVersion string, +) error { + if err := ValidateTransferChannelParams(ctx, am.keeper, order, portID, channelID, version); err != nil { + return err + } + + if counterpartyVersion != types.Version { + return sdkerrors.Wrapf(types.ErrInvalidVersion, "invalid counterparty version: got: %s, expected %s", counterpartyVersion, types.Version) + } + + // Module may have already claimed capability in OnChanOpenInit in the case of crossing hellos + // (ie chainA and chainB both call ChanOpenInit before one of them calls ChanOpenTry) + // If module can already authenticate the capability then module already owns it so we don't need to claim + // Otherwise, module does not have channel capability and we must claim it from IBC + if !am.keeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) { + // Only claim channel capability passed back by IBC module if we do not already own it + if err := am.keeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil { + return err + } + } + + return nil +} + +// OnChanOpenAck implements the IBCModule interface +func (am AppModule) OnChanOpenAck( + ctx sdk.Context, + portID, + channelID string, + counterpartyVersion string, +) error { + if counterpartyVersion != types.Version { + return sdkerrors.Wrapf(types.ErrInvalidVersion, "invalid counterparty version: %s, expected %s", counterpartyVersion, types.Version) + } + return nil +} + +// OnChanOpenConfirm implements the IBCModule interface +func (am AppModule) OnChanOpenConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return nil +} + +// OnChanCloseInit implements the IBCModule interface +func (am AppModule) OnChanCloseInit( + ctx sdk.Context, + portID, + channelID string, +) error { + // Disallow user-initiated channel closing for transfer channels + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "user cannot close channel") +} + +// OnChanCloseConfirm implements the IBCModule interface +func (am AppModule) OnChanCloseConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return nil +} + +// OnRecvPacket implements the IBCModule interface +func (am AppModule) OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, +) (*sdk.Result, []byte, error) { + var data types.FungibleTokenPacketData + if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + return nil, nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error()) + } + + acknowledgement := channeltypes.NewResultAcknowledgement([]byte{byte(1)}) + + err := am.keeper.OnRecvPacket(ctx, packet, data) + if err != nil { + acknowledgement = channeltypes.NewErrorAcknowledgement(err.Error()) + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypePacket, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyReceiver, data.Receiver), + sdk.NewAttribute(types.AttributeKeyDenom, data.Denom), + sdk.NewAttribute(types.AttributeKeyAmount, fmt.Sprintf("%d", data.Amount)), + sdk.NewAttribute(types.AttributeKeyAckSuccess, fmt.Sprintf("%t", err != nil)), + ), + ) + + // NOTE: acknowledgement will be written synchronously during IBC handler execution. + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, acknowledgement.GetBytes(), nil +} + +// OnAcknowledgementPacket implements the IBCModule interface +func (am AppModule) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, +) (*sdk.Result, error) { + var ack channeltypes.Acknowledgement + if err := types.ModuleCdc.UnmarshalJSON(acknowledgement, &ack); err != nil { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %v", err) + } + var data types.FungibleTokenPacketData + if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error()) + } + + if err := am.keeper.OnAcknowledgementPacket(ctx, packet, data, ack); err != nil { + return nil, err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypePacket, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyReceiver, data.Receiver), + sdk.NewAttribute(types.AttributeKeyDenom, data.Denom), + sdk.NewAttribute(types.AttributeKeyAmount, fmt.Sprintf("%d", data.Amount)), + sdk.NewAttribute(types.AttributeKeyAck, ack.String()), + ), + ) + + switch resp := ack.Response.(type) { + case *channeltypes.Acknowledgement_Result: + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypePacket, + sdk.NewAttribute(types.AttributeKeyAckSuccess, string(resp.Result)), + ), + ) + case *channeltypes.Acknowledgement_Error: + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypePacket, + sdk.NewAttribute(types.AttributeKeyAckError, resp.Error), + ), + ) + } + + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, nil +} + +// OnTimeoutPacket implements the IBCModule interface +func (am AppModule) OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, +) (*sdk.Result, error) { + var data types.FungibleTokenPacketData + if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet data: %s", err.Error()) + } + // refund tokens + if err := am.keeper.OnTimeoutPacket(ctx, packet, data); err != nil { + return nil, err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeTimeout, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyRefundReceiver, data.Sender), + sdk.NewAttribute(types.AttributeKeyRefundDenom, data.Denom), + sdk.NewAttribute(types.AttributeKeyRefundAmount, fmt.Sprintf("%d", data.Amount)), + ), + ) + + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, nil +} diff --git a/applications/transfer/module_test.go b/applications/transfer/module_test.go new file mode 100644 index 0000000000..d2acfb4043 --- /dev/null +++ b/applications/transfer/module_test.go @@ -0,0 +1,246 @@ +package transfer_test + +import ( + "math" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *TransferTestSuite) TestOnChanOpenInit() { + var ( + channel *channeltypes.Channel + testChannel ibctesting.TestChannel + connA *ibctesting.TestConnection + chanCap *capabilitytypes.Capability + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + + { + "success", func() {}, true, + }, + { + "max channels reached", func() { + testChannel.ID = channeltypes.FormatChannelIdentifier(math.MaxUint32 + 1) + }, false, + }, + { + "invalid order - ORDERED", func() { + channel.Ordering = channeltypes.ORDERED + }, false, + }, + { + "invalid port ID", func() { + testChannel = suite.chainA.NextTestChannel(connA, ibctesting.MockPort) + }, false, + }, + { + "invalid version", func() { + channel.Version = "version" + }, false, + }, + { + "capability already claimed", func() { + err := suite.chainA.App.ScopedTransferKeeper.ClaimCapability(suite.chainA.GetContext(), chanCap, host.ChannelCapabilityPath(testChannel.PortID, testChannel.ID)) + suite.Require().NoError(err) + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + counterparty := channeltypes.NewCounterparty(testChannel.PortID, testChannel.ID) + channel = &channeltypes.Channel{ + State: channeltypes.INIT, + Ordering: channeltypes.UNORDERED, + Counterparty: counterparty, + ConnectionHops: []string{connA.ID}, + Version: types.Version, + } + + module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort) + suite.Require().NoError(err) + + chanCap, err = suite.chainA.App.ScopedIBCKeeper.NewCapability(suite.chainA.GetContext(), host.ChannelCapabilityPath(ibctesting.TransferPort, testChannel.ID)) + suite.Require().NoError(err) + + cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module) + suite.Require().True(ok) + + tc.malleate() // explicitly change fields in channel and testChannel + + err = cbs.OnChanOpenInit(suite.chainA.GetContext(), channel.Ordering, channel.GetConnectionHops(), + testChannel.PortID, testChannel.ID, chanCap, channel.Counterparty, channel.GetVersion(), + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + + }) + } +} + +func (suite *TransferTestSuite) TestOnChanOpenTry() { + var ( + channel *channeltypes.Channel + testChannel ibctesting.TestChannel + connA *ibctesting.TestConnection + chanCap *capabilitytypes.Capability + counterpartyVersion string + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + + { + "success", func() {}, true, + }, + { + "max channels reached", func() { + testChannel.ID = channeltypes.FormatChannelIdentifier(math.MaxUint32 + 1) + }, false, + }, + { + "capability already claimed in INIT should pass", func() { + err := suite.chainA.App.ScopedTransferKeeper.ClaimCapability(suite.chainA.GetContext(), chanCap, host.ChannelCapabilityPath(testChannel.PortID, testChannel.ID)) + suite.Require().NoError(err) + }, true, + }, + { + "invalid order - ORDERED", func() { + channel.Ordering = channeltypes.ORDERED + }, false, + }, + { + "invalid port ID", func() { + testChannel = suite.chainA.NextTestChannel(connA, ibctesting.MockPort) + }, false, + }, + { + "invalid version", func() { + channel.Version = "version" + }, false, + }, + { + "invalid counterparty version", func() { + counterpartyVersion = "version" + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + counterparty := channeltypes.NewCounterparty(testChannel.PortID, testChannel.ID) + channel = &channeltypes.Channel{ + State: channeltypes.TRYOPEN, + Ordering: channeltypes.UNORDERED, + Counterparty: counterparty, + ConnectionHops: []string{connA.ID}, + Version: types.Version, + } + counterpartyVersion = types.Version + + module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort) + suite.Require().NoError(err) + + chanCap, err = suite.chainA.App.ScopedIBCKeeper.NewCapability(suite.chainA.GetContext(), host.ChannelCapabilityPath(ibctesting.TransferPort, testChannel.ID)) + suite.Require().NoError(err) + + cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module) + suite.Require().True(ok) + + tc.malleate() // explicitly change fields in channel and testChannel + + err = cbs.OnChanOpenTry(suite.chainA.GetContext(), channel.Ordering, channel.GetConnectionHops(), + testChannel.PortID, testChannel.ID, chanCap, channel.Counterparty, channel.GetVersion(), counterpartyVersion, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + + }) + } +} + +func (suite *TransferTestSuite) TestOnChanOpenAck() { + var ( + testChannel ibctesting.TestChannel + connA *ibctesting.TestConnection + counterpartyVersion string + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + + { + "success", func() {}, true, + }, + { + "invalid counterparty version", func() { + counterpartyVersion = "version" + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + _, _, connA, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + testChannel = suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + counterpartyVersion = types.Version + + module, _, err := suite.chainA.App.IBCKeeper.PortKeeper.LookupModuleByPort(suite.chainA.GetContext(), ibctesting.TransferPort) + suite.Require().NoError(err) + + cbs, ok := suite.chainA.App.IBCKeeper.Router.GetRoute(module) + suite.Require().True(ok) + + tc.malleate() // explicitly change fields in channel and testChannel + + err = cbs.OnChanOpenAck(suite.chainA.GetContext(), testChannel.PortID, testChannel.ID, counterpartyVersion) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + + }) + } +} diff --git a/applications/transfer/simulation/decoder.go b/applications/transfer/simulation/decoder.go new file mode 100644 index 0000000000..df78345038 --- /dev/null +++ b/applications/transfer/simulation/decoder.go @@ -0,0 +1,33 @@ +package simulation + +import ( + "bytes" + "fmt" + + "github.com/cosmos/cosmos-sdk/types/kv" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// TransferUnmarshaler defines the expected encoding store functions. +type TransferUnmarshaler interface { + MustUnmarshalDenomTrace([]byte) types.DenomTrace +} + +// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's +// Value to the corresponding DenomTrace type. +func NewDecodeStore(cdc TransferUnmarshaler) func(kvA, kvB kv.Pair) string { + return func(kvA, kvB kv.Pair) string { + switch { + case bytes.Equal(kvA.Key[:1], types.PortKey): + return fmt.Sprintf("Port A: %s\nPort B: %s", string(kvA.Value), string(kvB.Value)) + + case bytes.Equal(kvA.Key[:1], types.DenomTraceKey): + denomTraceA := cdc.MustUnmarshalDenomTrace(kvA.Value) + denomTraceB := cdc.MustUnmarshalDenomTrace(kvB.Value) + return fmt.Sprintf("DenomTrace A: %s\nDenomTrace B: %s", denomTraceA.IBCDenom(), denomTraceB.IBCDenom()) + + default: + panic(fmt.Sprintf("invalid %s key prefix %X", types.ModuleName, kvA.Key[:1])) + } + } +} diff --git a/applications/transfer/simulation/decoder_test.go b/applications/transfer/simulation/decoder_test.go new file mode 100644 index 0000000000..729a067e02 --- /dev/null +++ b/applications/transfer/simulation/decoder_test.go @@ -0,0 +1,59 @@ +package simulation_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/types/kv" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +func TestDecodeStore(t *testing.T) { + app := simapp.Setup(false) + dec := simulation.NewDecodeStore(app.TransferKeeper) + + trace := types.DenomTrace{ + BaseDenom: "uatom", + Path: "transfer/channelToA", + } + + kvPairs := kv.Pairs{ + Pairs: []kv.Pair{ + { + Key: types.PortKey, + Value: []byte(types.PortID), + }, + { + Key: types.DenomTraceKey, + Value: app.TransferKeeper.MustMarshalDenomTrace(trace), + }, + { + Key: []byte{0x99}, + Value: []byte{0x99}, + }, + }, + } + tests := []struct { + name string + expectedLog string + }{ + {"PortID", fmt.Sprintf("Port A: %s\nPort B: %s", types.PortID, types.PortID)}, + {"DenomTrace", fmt.Sprintf("DenomTrace A: %s\nDenomTrace B: %s", trace.IBCDenom(), trace.IBCDenom())}, + {"other", ""}, + } + + for i, tt := range tests { + i, tt := i, tt + t.Run(tt.name, func(t *testing.T) { + if i == len(tests)-1 { + require.Panics(t, func() { dec(kvPairs.Pairs[i], kvPairs.Pairs[i]) }, tt.name) + } else { + require.Equal(t, tt.expectedLog, dec(kvPairs.Pairs[i], kvPairs.Pairs[i]), tt.name) + } + }) + } +} diff --git a/applications/transfer/simulation/genesis.go b/applications/transfer/simulation/genesis.go new file mode 100644 index 0000000000..a51bce9f47 --- /dev/null +++ b/applications/transfer/simulation/genesis.go @@ -0,0 +1,54 @@ +package simulation + +import ( + "encoding/json" + "fmt" + "math/rand" + "strings" + + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// Simulation parameter constants +const port = "port_id" + +// RadomEnabled randomized send or receive enabled param with 75% prob of being true. +func RadomEnabled(r *rand.Rand) bool { + return r.Int63n(101) <= 75 +} + +// RandomizedGenState generates a random GenesisState for transfer. +func RandomizedGenState(simState *module.SimulationState) { + var portID string + simState.AppParams.GetOrGenerate( + simState.Cdc, port, &portID, simState.Rand, + func(r *rand.Rand) { portID = strings.ToLower(simtypes.RandStringOfLength(r, 20)) }, + ) + + var sendEnabled bool + simState.AppParams.GetOrGenerate( + simState.Cdc, string(types.KeySendEnabled), &sendEnabled, simState.Rand, + func(r *rand.Rand) { sendEnabled = RadomEnabled(r) }, + ) + + var receiveEnabled bool + simState.AppParams.GetOrGenerate( + simState.Cdc, string(types.KeyReceiveEnabled), &receiveEnabled, simState.Rand, + func(r *rand.Rand) { receiveEnabled = RadomEnabled(r) }, + ) + + transferGenesis := types.GenesisState{ + PortId: portID, + DenomTraces: types.Traces{}, + Params: types.NewParams(sendEnabled, receiveEnabled), + } + + bz, err := json.MarshalIndent(&transferGenesis, "", " ") + if err != nil { + panic(err) + } + fmt.Printf("Selected randomly generated %s parameters:\n%s\n", types.ModuleName, bz) + simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&transferGenesis) +} diff --git a/applications/transfer/simulation/genesis_test.go b/applications/transfer/simulation/genesis_test.go new file mode 100644 index 0000000000..12791d7445 --- /dev/null +++ b/applications/transfer/simulation/genesis_test.go @@ -0,0 +1,74 @@ +package simulation_test + +import ( + "encoding/json" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState. +// Abonormal scenarios are not tested here. +func TestRandomizedGenState(t *testing.T) { + interfaceRegistry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(interfaceRegistry) + + s := rand.NewSource(1) + r := rand.New(s) + + simState := module.SimulationState{ + AppParams: make(simtypes.AppParams), + Cdc: cdc, + Rand: r, + NumBonded: 3, + Accounts: simtypes.RandomAccounts(r, 3), + InitialStake: 1000, + GenState: make(map[string]json.RawMessage), + } + + simulation.RandomizedGenState(&simState) + + var ibcTransferGenesis types.GenesisState + simState.Cdc.MustUnmarshalJSON(simState.GenState[types.ModuleName], &ibcTransferGenesis) + + require.Equal(t, "euzxpfgkqegqiqwixnku", ibcTransferGenesis.PortId) + require.True(t, ibcTransferGenesis.Params.SendEnabled) + require.True(t, ibcTransferGenesis.Params.ReceiveEnabled) + require.Len(t, ibcTransferGenesis.DenomTraces, 0) + +} + +// TestRandomizedGenState tests abnormal scenarios of applying RandomizedGenState. +func TestRandomizedGenState1(t *testing.T) { + interfaceRegistry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(interfaceRegistry) + + s := rand.NewSource(1) + r := rand.New(s) + // all these tests will panic + tests := []struct { + simState module.SimulationState + panicMsg string + }{ + { // panic => reason: incomplete initialization of the simState + module.SimulationState{}, "invalid memory address or nil pointer dereference"}, + { // panic => reason: incomplete initialization of the simState + module.SimulationState{ + AppParams: make(simtypes.AppParams), + Cdc: cdc, + Rand: r, + }, "assignment to entry in nil map"}, + } + + for _, tt := range tests { + require.Panicsf(t, func() { simulation.RandomizedGenState(&tt.simState) }, tt.panicMsg) + } +} diff --git a/applications/transfer/simulation/params.go b/applications/transfer/simulation/params.go new file mode 100644 index 0000000000..67c61f514e --- /dev/null +++ b/applications/transfer/simulation/params.go @@ -0,0 +1,32 @@ +package simulation + +import ( + "fmt" + "math/rand" + + gogotypes "github.com/gogo/protobuf/types" + + "github.com/cosmos/cosmos-sdk/x/simulation" + + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// ParamChanges defines the parameters that can be modified by param change proposals +// on the simulation +func ParamChanges(r *rand.Rand) []simtypes.ParamChange { + return []simtypes.ParamChange{ + simulation.NewSimParamChange(types.ModuleName, string(types.KeySendEnabled), + func(r *rand.Rand) string { + sendEnabled := RadomEnabled(r) + return fmt.Sprintf("%s", types.ModuleCdc.MustMarshalJSON(&gogotypes.BoolValue{Value: sendEnabled})) + }, + ), + simulation.NewSimParamChange(types.ModuleName, string(types.KeyReceiveEnabled), + func(r *rand.Rand) string { + receiveEnabled := RadomEnabled(r) + return fmt.Sprintf("%s", types.ModuleCdc.MustMarshalJSON(&gogotypes.BoolValue{Value: receiveEnabled})) + }, + ), + } +} diff --git a/applications/transfer/simulation/params_test.go b/applications/transfer/simulation/params_test.go new file mode 100644 index 0000000000..a692d4328e --- /dev/null +++ b/applications/transfer/simulation/params_test.go @@ -0,0 +1,36 @@ +package simulation_test + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/simulation" +) + +func TestParamChanges(t *testing.T) { + s := rand.NewSource(1) + r := rand.New(s) + + expected := []struct { + composedKey string + key string + simValue string + subspace string + }{ + {"transfer/SendEnabled", "SendEnabled", "false", "transfer"}, + {"transfer/ReceiveEnabled", "ReceiveEnabled", "true", "transfer"}, + } + + paramChanges := simulation.ParamChanges(r) + + require.Len(t, paramChanges, 2) + + for i, p := range paramChanges { + require.Equal(t, expected[i].composedKey, p.ComposedKey()) + require.Equal(t, expected[i].key, p.Key()) + require.Equal(t, expected[i].simValue, p.SimValue()(r), p.Key()) + require.Equal(t, expected[i].subspace, p.Subspace()) + } +} diff --git a/applications/transfer/spec/01_concepts.md b/applications/transfer/spec/01_concepts.md new file mode 100644 index 0000000000..96f05f12a7 --- /dev/null +++ b/applications/transfer/spec/01_concepts.md @@ -0,0 +1,117 @@ + + +# Concepts + +## Acknowledgements + +ICS20 uses the recommended acknowledgement format as specified by [ICS 04](https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope). + +A successful receive of a transfer packet will result in a Result Acknowledgement being written +with the value `[]byte(byte(1))` in the `Response` field. + +An unsuccessful receive of a transfer packet will result in an Error Acknowledgement being written +with the error message in the `Response` field. + +## Denomination Trace + +The denomination trace corresponds to the information that allows a token to be traced back to its +origin chain. It contains a sequence of port and channel identifiers ordered from the most recent to +the oldest in the timeline of transfers. + +This information is included on the token denomination field in the form of a hash to prevent an +unbounded denomination length. For example, the token `transfer/channelToA/uatom` will be displayed +as `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2`. + +Each send to any chain other than the one it was previously received from is a movement forwards in +the token's timeline. This causes trace to be added to the token's history and the destination port +and destination channel to be prefixed to the denomination. In these instances the sender chain is +acting as the "source zone". When the token is sent back to the chain it previously received from, the +prefix is removed. This is a backwards movement in the token's timeline and the sender chain is +acting as the "sink zone". + +It is strongly recommended to read the full details of [ADR 001: Coin Source Tracing](./../../../../../docs/architecture/adr-001-coin-source-tracing.md) to understand the implications and context of the IBC token representations. + +### UX suggestions for clients + +For clients (wallets, exchanges, applications, block explorers, etc) that want to display the source of the token, it is recommended to use the following +alternatives for each of the cases below: + +#### Direct connection + +If the denomination trace contains a single identifier prefix pair (as in the example above), then +the easiest way to retrieve the chain and light client identifier is to map the trace information +directly. In summary, this requires querying the channel from the denomination trace identifiers, +and then the counterparty client state using the counterparty port and channel identifiers from the +retrieved channel. + +A general pseudo algorithm would look like the following: + +1. Query the full denomination trace. +2. Query the channel with the `portID/channelID` pair, which corresponds to the first destination of the + token. +3. Query the client state using the identifiers pair. Note that this query will return a `"Not + Found"` response if the current chain is not connected to this channel. +4. Retrieve the the client identifier or chain identifier from the client state (eg: on + Tendermint clients) and store it locally. + +Using the gRPC gataway client service the steps above would be, with a given IBC token `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` stored on `chainB`: + +1. `GET /ibc_transfer/v1beta1/denom_traces/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` -> `{"path": "transfer/channelToA", "base_denom": "uatom"}` +2. `GET /ibc/channel/v1beta1/channels/channelToA/ports/transfer/client_state"` -> `{"client_id": "clientA", "chain-id": "chainA", ...}` +3. `GET /ibc/channel/v1beta1/channels/channelToA/ports/transfer"` -> `{"channel_id": "channelToA", port_id": "transfer", counterparty: {"channel_id": "channelToB", port_id": "transfer"}, ...}` +4. `GET /ibc/channel/v1beta1/channels/channelToB/ports/transfer/client_state" -> {"client_id": "clientB", "chain-id": "chainB", ...}` + +Then, the token transfer chain path for the `uatom` denomination would be: `chainA` -> `chainB`. + +### Multiple hops + +The multiple channel hops case applies when the token has passed through multiple chains between the original source and final destination chains. + +The IBC protocol doesn't know the topology of the overall network (i.e connections between chains and identifier names between them). For this reason, in the the multiple hops case, a particular chain in the timeline of the individual transfers can't query the chain and client identifiers of the other chains. + +Take for example the following sequence of transfers `A -> B -> C` for an IBC token, with a final prefix path (trace info) of `transfer/channelChainC/transfer/channelChainB`. What the paragraph above means is that is that even in the case that chain `C` is directly connected to chain `A`, querying the port and channel identifiers that chain `B` uses to connect to chain `A` (eg: `transfer/channelChainA`) can be completely different from the one that chain `C` uses to connect to chain `A` (eg: `transfer/channelToChainA`). + +Thus the proposed solution for clients that the IBC team recommends are the following: + +- **Connect to all chains**: Connecting to all the chains in the timeline would allow clients to + perform the queries outlined in the [direct connection](#direct-connection) section to each + relevant chain. By repeatedly following the port and channel denomination trace transfer timeline, + clients should always be able to find all the relevant identifiers. This comes at the tradeoff + that the client must connect to nodes on each of the chains in order to perform the queries. +- **Relayer as a Service (RaaS)**: A longer term solution is to use/create a relayer service that + could map the denomination trace to the chain path timeline for each token (i.e `origin chain -> + chain #1 -> ... -> chain #(n-1) -> final chain`). These services could provide merkle proofs in + order to allow clients to optionally verify the path timeline correctness for themselves by + running light clients. If the proofs are not verified, they should be considered as trusted third + parties services. Additionally, client would be advised in the future to use RaaS that support the + largest number of connections between chains in the ecosystem. Unfortunately, none of the existing + public relayers (in [Golang](https://github.com/cosmos/relayer) and + [Rust](https://github.com/informalsystems/ibc-rs)), provide this service to clients. + +::: tip +The only viable alternative for clients (at the time of writing) to tokens with multiple connection hops, is to connect to all chains directly and perform relevant queries to each of them in the sequence. +::: + +## Locked Funds + +In some [exceptional cases](./../../../../../docs/architecture/adr-026-ibc-client-recovery-mechanisms.md#exceptional-cases), a client state associated with a given channel cannot be updated. This causes that funds from fungible tokens in that channel will be permanently locked and thus can no longer be transferred. + +To mitigate this, a client update governance proposal can be submitted to update the frozen client +with a new valid header. Once the proposal passes the client state will be unfrozen and the funds +from the associated channels will then be unlocked. This mechanism only applies to clients that +allow updates via governance, such as Tendermint clients. + +In addition to this, it's important to mention that a token must be sent back along the exact route +that it took originally un order to return it to its original form on the source chain (eg: the +Cosmos Hub for the `uatom`). Sending a token back to the same chain across a different channel will +**not** move the token back across its timeline. If a channel in the chain history closes before the +token can be sent back across that channel, then the token will not be returnable to its original +form. + + +## Security Considerations + +For safety, no other module must be capable of minting tokens with the `ibc/` prefix. The IBC +transfer module needs a subset of the denomination space that only it can create tokens in. diff --git a/applications/transfer/spec/02_state.md b/applications/transfer/spec/02_state.md new file mode 100644 index 0000000000..9cab8d677f --- /dev/null +++ b/applications/transfer/spec/02_state.md @@ -0,0 +1,10 @@ + + +# State + +The transfer IBC application module keeps state of the port to which the module is binded and the denomination trace information as outlined in [ADR 01](./../../../../../docs/architecture/adr-001-coin-source-tracing.md). + +- `Port`: `0x01 -> ProtocolBuffer(string)` +- `DenomTrace`: `0x02 | []bytes(traceHash) -> ProtocolBuffer(DenomTrace)` diff --git a/applications/transfer/spec/03_state_transitions.md b/applications/transfer/spec/03_state_transitions.md new file mode 100644 index 0000000000..9090da5434 --- /dev/null +++ b/applications/transfer/spec/03_state_transitions.md @@ -0,0 +1,36 @@ + + +# State Transitions + +## Send Fungible Tokens + +A successful fungible token send has two state transitions depending if the +transfer is a movement forward or backwards in the token's timeline: + +1. Sender chain is the source chain, *i.e* a transfer to any chain other than the one it was previously received from is a movement forwards in the token's timeline. This results in the following state transitions: + +- The coins are transferred to an escrow address (i.e locked) on the sender chain +- The coins are transferred to the receiving chain through IBC TAO logic. + +2. Sender chain is the sink chain, *i.e* the token is sent back to the chain it previously received from. This is a backwards movement in the token's timeline. This results in the following state transitions: + +- The coins (vouchers) are burned on the sender chain +- The coins transferred to the receiving chain though IBC TAO logic. + +## Receive Fungible Tokens + +A successful fungible token receive has two state transitions depending if the +transfer is a movement forward or backwards in the token's timeline: + +1. Receiver chain is the source chain. This is a backwards movement in the token's timeline. This results in the following state transitions: + +- The leftmost port and channel identifier pair is removed from the token denomination prefix. +- The tokens are unescrowed and sent to the receiving address. + +2. Receiver chain is the sink chain. This is a movement forwards in the token's timeline. This results in the following state transitions: + +- Token vouchers are minted by prefixing the destination port and channel identifiers to the trace information. +- The receiving chain stores the new trace information in the store (if not set already). +- The vouchers are sent to the receiving address. diff --git a/applications/transfer/spec/04_messages.md b/applications/transfer/spec/04_messages.md new file mode 100644 index 0000000000..9da7673eb3 --- /dev/null +++ b/applications/transfer/spec/04_messages.md @@ -0,0 +1,40 @@ + + +# Messages + +## MsgTransfer + +A fungible token cross chain transfer is achieved by using the `MsgTransfer`: + +```go +type MsgTransfer struct { + SourcePort string + SourceChannel string + Token sdk.Coin + Sender string + Receiver string + TimeoutHeight ibcexported.Height + TimeoutTimestamp uint64 +} +``` + +This message is expected to fail if: + +- `SourcePort` is invalid (see 24-host naming requirements) +- `SourceChannel` is invalid (see 24-host naming requirements) +- `Token` is invalid (denom is invalid or amount is negative) +- `Token.Amount` is not positive +- `Sender` is empty +- `Receiver` is empty +- `TimeoutHeight` and `TimeoutTimestamp` are both zero +- `Token.Denom` is not a valid IBC denomination as per [ADR 001 - Coin Source Tracing](./../../../../../docs/architecture/adr-001-coin-source-tracing.md). + +This message will send a fungible token to the counterparty chain represented +by the counterparty Channel End connected to the Channel End with the identifiers +`SourcePort` and `SourceChannel`. + +The denomination provided for transfer should correspond to the same denomination +represented on this chain. The prefixes will be added as necessary upon by the +receiving chain. diff --git a/applications/transfer/spec/05_events.md b/applications/transfer/spec/05_events.md new file mode 100644 index 0000000000..51b49da460 --- /dev/null +++ b/applications/transfer/spec/05_events.md @@ -0,0 +1,44 @@ + + +# Events + +## MsgTransfer + +| Type | Attribute Key | Attribute Value | +|--------------|---------------|-----------------| +| ibc_transfer | sender | {sender} | +| ibc_transfer | receiver | {receiver} | +| message | action | transfer | +| message | module | transfer | + +## OnRecvPacket callback + +| Type | Attribute Key | Attribute Value | +|-----------------------|---------------|-----------------| +| fungible_token_packet | module | transfer | +| fungible_token_packet | receiver | {receiver} | +| fungible_token_packet | denom | {denom} | +| fungible_token_packet | amount | {amount} | +| fungible_token_packet | success | {ackSuccess} | +| denomination_trace | trace_hash | {hex_hash} | + +## OnAcknowledgePacket callback + +| Type | Attribute Key | Attribute Value | +|-----------------------|-----------------|-------------------| +| fungible_token_packet | module | transfer | +| fungible_token_packet | receiver | {receiver} | +| fungible_token_packet | denom | {denom} | +| fungible_token_packet | amount | {amount} | +| fungible_token_packet | success | error | {ack.Response} | + +## OnTimeoutPacket callback + +| Type | Attribute Key | Attribute Value | +|-----------------------|-----------------|-----------------| +| fungible_token_packet | module | transfer | +| fungible_token_packet | refund_receiver | {receiver} | +| fungible_token_packet | denom | {denom} | +| fungible_token_packet | amount | {amount} | diff --git a/applications/transfer/spec/06_metrics.md b/applications/transfer/spec/06_metrics.md new file mode 100644 index 0000000000..21bb51c0a1 --- /dev/null +++ b/applications/transfer/spec/06_metrics.md @@ -0,0 +1,14 @@ + + +# Metrics + +The transfer IBC application module exposes the following set of [metrics](./../../../../../docs/core/telemetry.md). + +| Metric | Description | Unit | Type | +|:--------------------------------|:------------------------------------------------------------------------------------------|:----------------|:--------| +| `tx_msg_ibc_transfer` | The total amount of tokens transferred via IBC in a `MsgTransfer` (source or sink chain) | token | gauge | +| `ibc_transfer_packet_receive` | The total amount of tokens received in a `FungibleTokenPacketData` (source or sink chain) | token | gauge | +| `ibc_transfer_send` | Total number of IBC transfers sent from a chain (source or sink) | transfer | counter | +| `ibc_transfer_receive` | Total number of IBC transfers received to a chain (source or sink) | transfer | counter | diff --git a/applications/transfer/spec/07_params.md b/applications/transfer/spec/07_params.md new file mode 100644 index 0000000000..8d2b97c580 --- /dev/null +++ b/applications/transfer/spec/07_params.md @@ -0,0 +1,30 @@ + + +# Parameters + +The ibc-transfer module contains the following parameters: + +| Key | Type | Default Value | +|------------------|------|---------------| +| `SendEnabled` | bool | `true` | +| `ReceiveEnabled` | bool | `true` | + +## SendEnabled + +The transfers enabled parameter controls send cross-chain transfer capabilities for all fungible +tokens. + +To prevent a single token from being transferred from the chain, set the `SendEnabled` parameter to `true` and +then set the bank module's [`SendEnabled` parameter](./../../../../bank/spec/05_params.md#sendenabled) for +the denomination to `false`. + +## ReceiveEnabled + +The transfers enabled parameter controls receive cross-chain transfer capabilities for all fungible +tokens. + +To prevent a single token from being transferred to the chain, set the `ReceiveEnabled` parameter to `true` and +then set the bank module's [`SendEnabled` parameter](./../../../../bank/spec/05_params.md#sendenabled) for +the denomination to `false`. diff --git a/applications/transfer/spec/README.md b/applications/transfer/spec/README.md new file mode 100644 index 0000000000..5230fdde41 --- /dev/null +++ b/applications/transfer/spec/README.md @@ -0,0 +1,24 @@ + + +# `ibc-transfer` + +## Abstract + +This paper defines the implementation of the ICS20 protocol on the Cosmos SDK. + +For the general specification please refer to the [ICS20 Specification](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer). + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[State Transitions](03_state_transitions.md)** +4. **[Messages](04_messages.md)** +5. **[Events](05_events.md)** +6. **[Metrics](06_metrics.md)** +7. **[Parameters](07_params.md)** diff --git a/applications/transfer/types/codec.go b/applications/transfer/types/codec.go new file mode 100644 index 0000000000..24ad7e5a90 --- /dev/null +++ b/applications/transfer/types/codec.go @@ -0,0 +1,41 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +// RegisterLegacyAminoCodec registers the necessary x/ibc transfer interfaces and concrete types +// on the provided LegacyAmino codec. These types are used for Amino JSON serialization. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgTransfer{}, "cosmos-sdk/MsgTransfer", nil) +} + +// RegisterInterfaces register the ibc transfer module interfaces to protobuf +// Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgTransfer{}) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +var ( + amino = codec.NewLegacyAmino() + + // ModuleCdc references the global x/ibc-transfer module codec. Note, the codec + // should ONLY be used in certain instances of tests and for JSON encoding. + // + // The actual codec used for serialization should be provided to x/ibc transfer and + // defined at the application level. + ModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) + + // AminoCdc is a amino codec created to support amino json compatible msgs. + AminoCdc = codec.NewAminoCodec(amino) +) + +func init() { + RegisterLegacyAminoCodec(amino) + amino.Seal() +} diff --git a/applications/transfer/types/coin.go b/applications/transfer/types/coin.go new file mode 100644 index 0000000000..08ae9a8d32 --- /dev/null +++ b/applications/transfer/types/coin.go @@ -0,0 +1,48 @@ +package types + +import ( + "fmt" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SenderChainIsSource returns false if the denomination originally came +// from the receiving chain and true otherwise. +func SenderChainIsSource(sourcePort, sourceChannel, denom string) bool { + // This is the prefix that would have been prefixed to the denomination + // on sender chain IF and only if the token originally came from the + // receiving chain. + + return !ReceiverChainIsSource(sourcePort, sourceChannel, denom) +} + +// ReceiverChainIsSource returns true if the denomination originally came +// from the receiving chain and false otherwise. +func ReceiverChainIsSource(sourcePort, sourceChannel, denom string) bool { + // The prefix passed in should contain the SourcePort and SourceChannel. + // If the receiver chain originally sent the token to the sender chain + // the denom will have the sender's SourcePort and SourceChannel as the + // prefix. + + voucherPrefix := GetDenomPrefix(sourcePort, sourceChannel) + return strings.HasPrefix(denom, voucherPrefix) + +} + +// GetDenomPrefix returns the receiving denomination prefix +func GetDenomPrefix(portID, channelID string) string { + return fmt.Sprintf("%s/%s/", portID, channelID) +} + +// GetPrefixedDenom returns the denomination with the portID and channelID prefixed +func GetPrefixedDenom(portID, channelID, baseDenom string) string { + return fmt.Sprintf("%s/%s/%s", portID, channelID, baseDenom) +} + +// GetTransferCoin creates a transfer coin with the port ID and channel ID +// prefixed to the base denom. +func GetTransferCoin(portID, channelID, baseDenom string, amount int64) sdk.Coin { + denomTrace := ParseDenomTrace(GetPrefixedDenom(portID, channelID, baseDenom)) + return sdk.NewInt64Coin(denomTrace.IBCDenom(), amount) +} diff --git a/applications/transfer/types/errors.go b/applications/transfer/types/errors.go new file mode 100644 index 0000000000..07cba19491 --- /dev/null +++ b/applications/transfer/types/errors.go @@ -0,0 +1,17 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// IBC channel sentinel errors +var ( + ErrInvalidPacketTimeout = sdkerrors.Register(ModuleName, 2, "invalid packet timeout") + ErrInvalidDenomForTransfer = sdkerrors.Register(ModuleName, 3, "invalid denomination for cross-chain transfer") + ErrInvalidVersion = sdkerrors.Register(ModuleName, 4, "invalid ICS20 version") + ErrInvalidAmount = sdkerrors.Register(ModuleName, 5, "invalid token amount") + ErrTraceNotFound = sdkerrors.Register(ModuleName, 6, "denomination trace not found") + ErrSendDisabled = sdkerrors.Register(ModuleName, 7, "fungible token transfers from this chain are disabled") + ErrReceiveDisabled = sdkerrors.Register(ModuleName, 8, "fungible token transfers to this chain are disabled") + ErrMaxTransferChannels = sdkerrors.Register(ModuleName, 9, "max transfer channels") +) diff --git a/applications/transfer/types/events.go b/applications/transfer/types/events.go new file mode 100644 index 0000000000..a3ed5b413c --- /dev/null +++ b/applications/transfer/types/events.go @@ -0,0 +1,21 @@ +package types + +// IBC transfer events +const ( + EventTypeTimeout = "timeout" + EventTypePacket = "fungible_token_packet" + EventTypeTransfer = "ibc_transfer" + EventTypeChannelClose = "channel_closed" + EventTypeDenomTrace = "denomination_trace" + + AttributeKeyReceiver = "receiver" + AttributeKeyDenom = "denom" + AttributeKeyAmount = "amount" + AttributeKeyRefundReceiver = "refund_receiver" + AttributeKeyRefundDenom = "refund_denom" + AttributeKeyRefundAmount = "refund_amount" + AttributeKeyAckSuccess = "success" + AttributeKeyAck = "acknowledgement" + AttributeKeyAckError = "error" + AttributeKeyTraceHash = "trace_hash" +) diff --git a/applications/transfer/types/expected_keepers.go b/applications/transfer/types/expected_keepers.go new file mode 100644 index 0000000000..284463350e --- /dev/null +++ b/applications/transfer/types/expected_keepers.go @@ -0,0 +1,48 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + ibcexported "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// AccountKeeper defines the contract required for account APIs. +type AccountKeeper interface { + GetModuleAddress(name string) sdk.AccAddress + GetModuleAccount(ctx sdk.Context, name string) types.ModuleAccountI +} + +// BankKeeper defines the expected bank keeper +type BankKeeper interface { + SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error + MintCoins(ctx sdk.Context, moduleName string, amt sdk.Coins) error + BurnCoins(ctx sdk.Context, moduleName string, amt sdk.Coins) error + SendCoinsFromModuleToAccount(ctx sdk.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromAccountToModule(ctx sdk.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error +} + +// ChannelKeeper defines the expected IBC channel keeper +type ChannelKeeper interface { + GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) + GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) + SendPacket(ctx sdk.Context, channelCap *capabilitytypes.Capability, packet ibcexported.PacketI) error + ChanCloseInit(ctx sdk.Context, portID, channelID string, chanCap *capabilitytypes.Capability) error +} + +// ClientKeeper defines the expected IBC client keeper +type ClientKeeper interface { + GetClientConsensusState(ctx sdk.Context, clientID string) (connection ibcexported.ConsensusState, found bool) +} + +// ConnectionKeeper defines the expected IBC connection keeper +type ConnectionKeeper interface { + GetConnection(ctx sdk.Context, connectionID string) (connection connectiontypes.ConnectionEnd, found bool) +} + +// PortKeeper defines the expected IBC port keeper +type PortKeeper interface { + BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability +} diff --git a/applications/transfer/types/genesis.go b/applications/transfer/types/genesis.go new file mode 100644 index 0000000000..682b04c4cf --- /dev/null +++ b/applications/transfer/types/genesis.go @@ -0,0 +1,35 @@ +package types + +import ( + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// NewGenesisState creates a new ibc-transfer GenesisState instance. +func NewGenesisState(portID string, denomTraces Traces, params Params) *GenesisState { + return &GenesisState{ + PortId: portID, + DenomTraces: denomTraces, + Params: params, + } +} + +// DefaultGenesisState returns a GenesisState with "transfer" as the default PortID. +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + PortId: PortID, + DenomTraces: Traces{}, + Params: DefaultParams(), + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + if err := host.PortIdentifierValidator(gs.PortId); err != nil { + return err + } + if err := gs.DenomTraces.Validate(); err != nil { + return err + } + return gs.Params.Validate() +} diff --git a/applications/transfer/types/genesis.pb.go b/applications/transfer/types/genesis.pb.go new file mode 100644 index 0000000000..3ae0442f82 --- /dev/null +++ b/applications/transfer/types/genesis.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/transfer/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ibc-transfer genesis state +type GenesisState struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + DenomTraces Traces `protobuf:"bytes,2,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces" yaml:"denom_traces"` + Params Params `protobuf:"bytes,3,opt,name=params,proto3" json:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_a4f788affd5bea89, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *GenesisState) GetDenomTraces() Traces { + if m != nil { + return m.DenomTraces + } + return nil +} + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibc.applications.transfer.v1.GenesisState") +} + +func init() { + proto.RegisterFile("ibc/applications/transfer/v1/genesis.proto", fileDescriptor_a4f788affd5bea89) +} + +var fileDescriptor_a4f788affd5bea89 = []byte{ + // 317 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xca, 0x4c, 0x4a, 0xd6, + 0x4f, 0x2c, 0x28, 0xc8, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x2f, 0x29, 0x4a, + 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, + 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc9, 0x4c, 0x4a, 0xd6, 0x43, 0x56, 0xab, 0x07, + 0x53, 0xab, 0x57, 0x66, 0x28, 0x25, 0x92, 0x9e, 0x9f, 0x9e, 0x0f, 0x56, 0xa8, 0x0f, 0x62, 0x41, + 0xf4, 0x48, 0x69, 0xe3, 0x35, 0x1f, 0xae, 0x1f, 0xac, 0x58, 0xe9, 0x33, 0x23, 0x17, 0x8f, 0x3b, + 0xc4, 0xca, 0xe0, 0x92, 0xc4, 0x92, 0x54, 0x21, 0x6d, 0x2e, 0xf6, 0x82, 0xfc, 0xa2, 0x92, 0xf8, + 0xcc, 0x14, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xa1, 0x4f, 0xf7, 0xe4, 0xf9, 0x2a, 0x13, + 0x73, 0x73, 0xac, 0x94, 0xa0, 0x12, 0x4a, 0x41, 0x6c, 0x20, 0x96, 0x67, 0x8a, 0x50, 0x11, 0x17, + 0x4f, 0x4a, 0x6a, 0x5e, 0x7e, 0x6e, 0x7c, 0x49, 0x51, 0x62, 0x72, 0x6a, 0xb1, 0x04, 0x93, 0x02, + 0xb3, 0x06, 0xb7, 0x91, 0x86, 0x1e, 0x3e, 0x57, 0xeb, 0xb9, 0x80, 0x74, 0x84, 0x80, 0x34, 0x38, + 0xa9, 0x9e, 0xb8, 0x27, 0xcf, 0xf0, 0xe9, 0x9e, 0xbc, 0x30, 0xc4, 0x7c, 0x64, 0xb3, 0x94, 0x56, + 0xdd, 0x97, 0x67, 0x03, 0xab, 0x2a, 0x0e, 0xe2, 0x4e, 0x81, 0x6b, 0x29, 0x16, 0x72, 0xe2, 0x62, + 0x2b, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x96, 0x60, 0x56, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xc1, 0x6f, + 0x5b, 0x00, 0x58, 0xad, 0x13, 0x0b, 0xc8, 0xa6, 0x20, 0xa8, 0x4e, 0xa7, 0x88, 0x13, 0x8f, 0xe4, + 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, + 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, + 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0x2f, 0xce, 0xcd, 0x2f, 0x86, 0x52, 0xba, 0xc5, 0x29, 0xd9, 0xfa, + 0x15, 0xfa, 0xb8, 0xc3, 0xb6, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0x1c, 0xac, 0xc6, 0x80, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xda, 0xbb, 0x81, 0x1e, 0xe5, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.DenomTraces) > 0 { + for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + if len(m.DenomTraces) > 0 { + for _, e := range m.DenomTraces { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomTraces = append(m.DenomTraces, DenomTrace{}) + if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/applications/transfer/types/genesis_test.go b/applications/transfer/types/genesis_test.go new file mode 100644 index 0000000000..a2aba58ca6 --- /dev/null +++ b/applications/transfer/types/genesis_test.go @@ -0,0 +1,47 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +func TestValidateGenesis(t *testing.T) { + testCases := []struct { + name string + genState *types.GenesisState + expPass bool + }{ + { + name: "default", + genState: types.DefaultGenesisState(), + expPass: true, + }, + { + "valid genesis", + &types.GenesisState{ + PortId: "portidone", + }, + true, + }, + { + "invalid client", + &types.GenesisState{ + PortId: "(INVALIDPORT)", + }, + false, + }, + } + + for _, tc := range testCases { + tc := tc + err := tc.genState.Validate() + if tc.expPass { + require.NoError(t, err, tc.name) + } else { + require.Error(t, err, tc.name) + } + } +} diff --git a/applications/transfer/types/keys.go b/applications/transfer/types/keys.go new file mode 100644 index 0000000000..c156af3fd8 --- /dev/null +++ b/applications/transfer/types/keys.go @@ -0,0 +1,55 @@ +package types + +import ( + "crypto/sha256" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + // ModuleName defines the IBC transfer name + ModuleName = "transfer" + + // Version defines the current version the IBC tranfer + // module supports + Version = "ics20-1" + + // PortID is the default port id that transfer module binds to + PortID = "transfer" + + // StoreKey is the store key string for IBC transfer + StoreKey = ModuleName + + // RouterKey is the message route for IBC transfer + RouterKey = ModuleName + + // QuerierRoute is the querier route for IBC transfer + QuerierRoute = ModuleName + + // DenomPrefix is the prefix used for internal SDK coin representation. + DenomPrefix = "ibc" +) + +var ( + // PortKey defines the key to store the port ID in store + PortKey = []byte{0x01} + // DenomTraceKey defines the key to store the denomination trace info in store + DenomTraceKey = []byte{0x02} +) + +// GetEscrowAddress returns the escrow address for the specified channel. +// The escrow address follows the format as outlined in ADR 028: +// https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-028-public-key-addresses.md +func GetEscrowAddress(portID, channelID string) sdk.AccAddress { + // a slash is used to create domain separation between port and channel identifiers to + // prevent address collisions between escrow addresses created for different channels + contents := fmt.Sprintf("%s/%s", portID, channelID) + + // ADR 028 AddressHash construction + preImage := []byte(Version) + preImage = append(preImage, 0) + preImage = append(preImage, contents...) + hash := sha256.Sum256(preImage) + return hash[:20] +} diff --git a/applications/transfer/types/keys_test.go b/applications/transfer/types/keys_test.go new file mode 100644 index 0000000000..9ab3314c2e --- /dev/null +++ b/applications/transfer/types/keys_test.go @@ -0,0 +1,24 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" +) + +// Test that there is domain separation between the port id and the channel id otherwise an +// escrow address may overlap with another channel end +func TestGetEscrowAddress(t *testing.T) { + var ( + port1 = "transfer" + channel1 = "channel" + port2 = "transfercha" + channel2 = "nnel" + ) + + escrow1 := types.GetEscrowAddress(port1, channel1) + escrow2 := types.GetEscrowAddress(port2, channel2) + require.NotEqual(t, escrow1, escrow2) +} diff --git a/applications/transfer/types/msgs.go b/applications/transfer/types/msgs.go new file mode 100644 index 0000000000..cf2293213a --- /dev/null +++ b/applications/transfer/types/msgs.go @@ -0,0 +1,85 @@ +package types + +import ( + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// msg types +const ( + TypeMsgTransfer = "transfer" +) + +// NewMsgTransfer creates a new MsgTransfer instance +//nolint:interfacer +func NewMsgTransfer( + sourcePort, sourceChannel string, + token sdk.Coin, sender sdk.AccAddress, receiver string, + timeoutHeight clienttypes.Height, timeoutTimestamp uint64, +) *MsgTransfer { + return &MsgTransfer{ + SourcePort: sourcePort, + SourceChannel: sourceChannel, + Token: token, + Sender: sender.String(), + Receiver: receiver, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTimestamp, + } +} + +// Route implements sdk.Msg +func (MsgTransfer) Route() string { + return RouterKey +} + +// Type implements sdk.Msg +func (MsgTransfer) Type() string { + return TypeMsgTransfer +} + +// ValidateBasic performs a basic check of the MsgTransfer fields. +// NOTE: timeout height or timestamp values can be 0 to disable the timeout. +// NOTE: The recipient addresses format is not validated as the format defined by +// the chain is not known to IBC. +func (msg MsgTransfer) ValidateBasic() error { + if err := host.PortIdentifierValidator(msg.SourcePort); err != nil { + return sdkerrors.Wrap(err, "invalid source port ID") + } + if err := host.ChannelIdentifierValidator(msg.SourceChannel); err != nil { + return sdkerrors.Wrap(err, "invalid source channel ID") + } + if !msg.Token.IsValid() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidCoins, msg.Token.String()) + } + if !msg.Token.IsPositive() { + return sdkerrors.Wrap(sdkerrors.ErrInsufficientFunds, msg.Token.String()) + } + // NOTE: sender format must be validated as it is required by the GetSigners function. + _, err := sdk.AccAddressFromBech32(msg.Sender) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + if strings.TrimSpace(msg.Receiver) == "" { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "missing recipient address") + } + return ValidateIBCDenom(msg.Token.Denom) +} + +// GetSignBytes implements sdk.Msg. +func (msg MsgTransfer) GetSignBytes() []byte { + return sdk.MustSortJSON(AminoCdc.MustMarshalJSON(&msg)) +} + +// GetSigners implements sdk.Msg +func (msg MsgTransfer) GetSigners() []sdk.AccAddress { + valAddr, err := sdk.AccAddressFromBech32(msg.Sender) + if err != nil { + panic(err) + } + return []sdk.AccAddress{valAddr} +} diff --git a/applications/transfer/types/msgs_test.go b/applications/transfer/types/msgs_test.go new file mode 100644 index 0000000000..1fc70c543b --- /dev/null +++ b/applications/transfer/types/msgs_test.go @@ -0,0 +1,103 @@ +package types + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +// define constants used for testing +const ( + validPort = "testportid" + invalidPort = "(invalidport1)" + invalidShortPort = "p" + invalidLongPort = "invalidlongportinvalidlongportinvalidlongportinvalidlongportinvalid" + + validChannel = "testchannel" + invalidChannel = "(invalidchannel1)" + invalidShortChannel = "invalid" + invalidLongChannel = "invalidlongchannelinvalidlongchannelinvalidlongchannelinvalidlongchannel" +) + +var ( + addr1 = sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address()) + addr2 = sdk.AccAddress("testaddr2").String() + emptyAddr sdk.AccAddress + + coin = sdk.NewCoin("atom", sdk.NewInt(100)) + ibcCoin = sdk.NewCoin("ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", sdk.NewInt(100)) + invalidIBCCoin = sdk.NewCoin("notibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", sdk.NewInt(100)) + invalidDenomCoin = sdk.Coin{Denom: "0atom", Amount: sdk.NewInt(100)} + zeroCoin = sdk.Coin{Denom: "atoms", Amount: sdk.NewInt(0)} + + timeoutHeight = clienttypes.NewHeight(0, 10) +) + +// TestMsgTransferRoute tests Route for MsgTransfer +func TestMsgTransferRoute(t *testing.T) { + msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0) + + require.Equal(t, RouterKey, msg.Route()) +} + +// TestMsgTransferType tests Type for MsgTransfer +func TestMsgTransferType(t *testing.T) { + msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0) + + require.Equal(t, "transfer", msg.Type()) +} + +func TestMsgTransferGetSignBytes(t *testing.T) { + msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0) + expected := fmt.Sprintf(`{"type":"cosmos-sdk/MsgTransfer","value":{"receiver":"%s","sender":"%s","source_channel":"testchannel","source_port":"testportid","timeout_height":{"revision_height":"10"},"token":{"amount":"100","denom":"atom"}}}`, addr2, addr1) + require.NotPanics(t, func() { + res := msg.GetSignBytes() + require.Equal(t, expected, string(res)) + }) +} + +// TestMsgTransferValidation tests ValidateBasic for MsgTransfer +func TestMsgTransferValidation(t *testing.T) { + testCases := []struct { + name string + msg *MsgTransfer + expPass bool + }{ + {"valid msg with base denom", NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0), true}, + {"valid msg with trace hash", NewMsgTransfer(validPort, validChannel, ibcCoin, addr1, addr2, timeoutHeight, 0), true}, + {"invalid ibc denom", NewMsgTransfer(validPort, validChannel, invalidIBCCoin, addr1, addr2, timeoutHeight, 0), false}, + {"too short port id", NewMsgTransfer(invalidShortPort, validChannel, coin, addr1, addr2, timeoutHeight, 0), false}, + {"too long port id", NewMsgTransfer(invalidLongPort, validChannel, coin, addr1, addr2, timeoutHeight, 0), false}, + {"port id contains non-alpha", NewMsgTransfer(invalidPort, validChannel, coin, addr1, addr2, timeoutHeight, 0), false}, + {"too short channel id", NewMsgTransfer(validPort, invalidShortChannel, coin, addr1, addr2, timeoutHeight, 0), false}, + {"too long channel id", NewMsgTransfer(validPort, invalidLongChannel, coin, addr1, addr2, timeoutHeight, 0), false}, + {"channel id contains non-alpha", NewMsgTransfer(validPort, invalidChannel, coin, addr1, addr2, timeoutHeight, 0), false}, + {"invalid denom", NewMsgTransfer(validPort, validChannel, invalidDenomCoin, addr1, addr2, timeoutHeight, 0), false}, + {"zero coin", NewMsgTransfer(validPort, validChannel, zeroCoin, addr1, addr2, timeoutHeight, 0), false}, + {"missing sender address", NewMsgTransfer(validPort, validChannel, coin, emptyAddr, addr2, timeoutHeight, 0), false}, + {"missing recipient address", NewMsgTransfer(validPort, validChannel, coin, addr1, "", timeoutHeight, 0), false}, + {"empty coin", NewMsgTransfer(validPort, validChannel, sdk.Coin{}, addr1, addr2, timeoutHeight, 0), false}, + } + + for i, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +// TestMsgTransferGetSigners tests GetSigners for MsgTransfer +func TestMsgTransferGetSigners(t *testing.T) { + msg := NewMsgTransfer(validPort, validChannel, coin, addr1, addr2, timeoutHeight, 0) + res := msg.GetSigners() + + require.Equal(t, []sdk.AccAddress{addr1}, res) +} diff --git a/applications/transfer/types/packet.go b/applications/transfer/types/packet.go new file mode 100644 index 0000000000..d726577f6f --- /dev/null +++ b/applications/transfer/types/packet.go @@ -0,0 +1,56 @@ +package types + +import ( + "strings" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +var ( + // DefaultRelativePacketTimeoutHeight is the default packet timeout height (in blocks) relative + // to the current block height of the counterparty chain provided by the client state. The + // timeout is disabled when set to 0. + DefaultRelativePacketTimeoutHeight = "0-1000" + + // DefaultRelativePacketTimeoutTimestamp is the default packet timeout timestamp (in nanoseconds) + // relative to the current block timestamp of the counterparty chain provided by the client + // state. The timeout is disabled when set to 0. The default is currently set to a 10 minute + // timeout. + DefaultRelativePacketTimeoutTimestamp = uint64((time.Duration(10) * time.Minute).Nanoseconds()) +) + +// NewFungibleTokenPacketData contructs a new FungibleTokenPacketData instance +func NewFungibleTokenPacketData( + denom string, amount uint64, + sender, receiver string, +) FungibleTokenPacketData { + return FungibleTokenPacketData{ + Denom: denom, + Amount: amount, + Sender: sender, + Receiver: receiver, + } +} + +// ValidateBasic is used for validating the token transfer. +// NOTE: The addresses formats are not validated as the sender and recipient can have different +// formats defined by their corresponding chains that are not known to IBC. +func (ftpd FungibleTokenPacketData) ValidateBasic() error { + if ftpd.Amount == 0 { + return sdkerrors.Wrap(ErrInvalidAmount, "amount cannot be 0") + } + if strings.TrimSpace(ftpd.Sender) == "" { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "sender address cannot be blank") + } + if strings.TrimSpace(ftpd.Receiver) == "" { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "receiver address cannot be blank") + } + return ValidatePrefixedDenom(ftpd.Denom) +} + +// GetBytes is a helper for serialising +func (ftpd FungibleTokenPacketData) GetBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&ftpd)) +} diff --git a/applications/transfer/types/packet_test.go b/applications/transfer/types/packet_test.go new file mode 100644 index 0000000000..1edcb093d3 --- /dev/null +++ b/applications/transfer/types/packet_test.go @@ -0,0 +1,36 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + denom = "transfer/gaiachannel/atom" + amount = uint64(100) +) + +// TestFungibleTokenPacketDataValidateBasic tests ValidateBasic for FungibleTokenPacketData +func TestFungibleTokenPacketDataValidateBasic(t *testing.T) { + testCases := []struct { + name string + packetData FungibleTokenPacketData + expPass bool + }{ + {"valid packet", NewFungibleTokenPacketData(denom, amount, addr1.String(), addr2), true}, + {"invalid denom", NewFungibleTokenPacketData("", amount, addr1.String(), addr2), false}, + {"invalid amount", NewFungibleTokenPacketData(denom, 0, addr1.String(), addr2), false}, + {"missing sender address", NewFungibleTokenPacketData(denom, amount, emptyAddr.String(), addr2), false}, + {"missing recipient address", NewFungibleTokenPacketData(denom, amount, addr1.String(), emptyAddr.String()), false}, + } + + for i, tc := range testCases { + err := tc.packetData.ValidateBasic() + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %v", i, err) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + } + } +} diff --git a/applications/transfer/types/params.go b/applications/transfer/types/params.go new file mode 100644 index 0000000000..4ecdfab77e --- /dev/null +++ b/applications/transfer/types/params.go @@ -0,0 +1,65 @@ +package types + +import ( + "fmt" + + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +const ( + // DefaultSendEnabled enabled + DefaultSendEnabled = true + // DefaultReceiveEnabled enabled + DefaultReceiveEnabled = true +) + +var ( + // KeySendEnabled is store's key for SendEnabled Params + KeySendEnabled = []byte("SendEnabled") + // KeyReceiveEnabled is store's key for ReceiveEnabled Params + KeyReceiveEnabled = []byte("ReceiveEnabled") +) + +// ParamKeyTable type declaration for parameters +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +// NewParams creates a new parameter configuration for the ibc transfer module +func NewParams(enableSend, enableReceive bool) Params { + return Params{ + SendEnabled: enableSend, + ReceiveEnabled: enableReceive, + } +} + +// DefaultParams is the default parameter configuration for the ibc-transfer module +func DefaultParams() Params { + return NewParams(DefaultSendEnabled, DefaultReceiveEnabled) +} + +// Validate all ibc-transfer module parameters +func (p Params) Validate() error { + if err := validateEnabled(p.SendEnabled); err != nil { + return err + } + + return validateEnabled(p.ReceiveEnabled) +} + +// ParamSetPairs implements params.ParamSet +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair(KeySendEnabled, p.SendEnabled, validateEnabled), + paramtypes.NewParamSetPair(KeyReceiveEnabled, p.ReceiveEnabled, validateEnabled), + } +} + +func validateEnabled(i interface{}) error { + _, ok := i.(bool) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + return nil +} diff --git a/applications/transfer/types/params_test.go b/applications/transfer/types/params_test.go new file mode 100644 index 0000000000..825efb825c --- /dev/null +++ b/applications/transfer/types/params_test.go @@ -0,0 +1,12 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestValidateParams(t *testing.T) { + require.NoError(t, DefaultParams().Validate()) + require.NoError(t, NewParams(true, false).Validate()) +} diff --git a/applications/transfer/types/query.pb.go b/applications/transfer/types/query.pb.go new file mode 100644 index 0000000000..1c1d692951 --- /dev/null +++ b/applications/transfer/types/query.pb.go @@ -0,0 +1,1418 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/transfer/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC +// method +type QueryDenomTraceRequest struct { + // hash (in hex format) of the denomination trace information. + Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *QueryDenomTraceRequest) Reset() { *m = QueryDenomTraceRequest{} } +func (m *QueryDenomTraceRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDenomTraceRequest) ProtoMessage() {} +func (*QueryDenomTraceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a638e2800a01538c, []int{0} +} +func (m *QueryDenomTraceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDenomTraceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDenomTraceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDenomTraceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDenomTraceRequest.Merge(m, src) +} +func (m *QueryDenomTraceRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDenomTraceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDenomTraceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDenomTraceRequest proto.InternalMessageInfo + +func (m *QueryDenomTraceRequest) GetHash() string { + if m != nil { + return m.Hash + } + return "" +} + +// QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC +// method. +type QueryDenomTraceResponse struct { + // denom_trace returns the requested denomination trace information. + DenomTrace *DenomTrace `protobuf:"bytes,1,opt,name=denom_trace,json=denomTrace,proto3" json:"denom_trace,omitempty"` +} + +func (m *QueryDenomTraceResponse) Reset() { *m = QueryDenomTraceResponse{} } +func (m *QueryDenomTraceResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDenomTraceResponse) ProtoMessage() {} +func (*QueryDenomTraceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a638e2800a01538c, []int{1} +} +func (m *QueryDenomTraceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDenomTraceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDenomTraceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDenomTraceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDenomTraceResponse.Merge(m, src) +} +func (m *QueryDenomTraceResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDenomTraceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDenomTraceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDenomTraceResponse proto.InternalMessageInfo + +func (m *QueryDenomTraceResponse) GetDenomTrace() *DenomTrace { + if m != nil { + return m.DenomTrace + } + return nil +} + +// QueryConnectionsRequest is the request type for the Query/DenomTraces RPC +// method +type QueryDenomTracesRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDenomTracesRequest) Reset() { *m = QueryDenomTracesRequest{} } +func (m *QueryDenomTracesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDenomTracesRequest) ProtoMessage() {} +func (*QueryDenomTracesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a638e2800a01538c, []int{2} +} +func (m *QueryDenomTracesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDenomTracesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDenomTracesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDenomTracesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDenomTracesRequest.Merge(m, src) +} +func (m *QueryDenomTracesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDenomTracesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDenomTracesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDenomTracesRequest proto.InternalMessageInfo + +func (m *QueryDenomTracesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryConnectionsResponse is the response type for the Query/DenomTraces RPC +// method. +type QueryDenomTracesResponse struct { + // denom_traces returns all denominations trace information. + DenomTraces Traces `protobuf:"bytes,1,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDenomTracesResponse) Reset() { *m = QueryDenomTracesResponse{} } +func (m *QueryDenomTracesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDenomTracesResponse) ProtoMessage() {} +func (*QueryDenomTracesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a638e2800a01538c, []int{3} +} +func (m *QueryDenomTracesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDenomTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDenomTracesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDenomTracesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDenomTracesResponse.Merge(m, src) +} +func (m *QueryDenomTracesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDenomTracesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDenomTracesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDenomTracesResponse proto.InternalMessageInfo + +func (m *QueryDenomTracesResponse) GetDenomTraces() Traces { + if m != nil { + return m.DenomTraces + } + return nil +} + +func (m *QueryDenomTracesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a638e2800a01538c, []int{4} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params defines the parameters of the module. + Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a638e2800a01538c, []int{5} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() *Params { + if m != nil { + return m.Params + } + return nil +} + +func init() { + proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibc.applications.transfer.v1.QueryDenomTraceRequest") + proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibc.applications.transfer.v1.QueryDenomTraceResponse") + proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibc.applications.transfer.v1.QueryDenomTracesRequest") + proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibc.applications.transfer.v1.QueryDenomTracesResponse") + proto.RegisterType((*QueryParamsRequest)(nil), "ibc.applications.transfer.v1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "ibc.applications.transfer.v1.QueryParamsResponse") +} + +func init() { + proto.RegisterFile("ibc/applications/transfer/v1/query.proto", fileDescriptor_a638e2800a01538c) +} + +var fileDescriptor_a638e2800a01538c = []byte{ + // 528 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x3f, 0x6f, 0xd3, 0x40, + 0x14, 0xcf, 0x95, 0x12, 0x89, 0x17, 0xc4, 0x70, 0x54, 0x10, 0x59, 0x95, 0x5b, 0x59, 0x08, 0x02, + 0x85, 0x3b, 0x5c, 0xa0, 0x30, 0xa0, 0x0e, 0x15, 0x02, 0xb1, 0x95, 0xc0, 0x80, 0x60, 0x40, 0x67, + 0xe7, 0x70, 0x2c, 0x1a, 0x9f, 0xeb, 0xbb, 0x44, 0x54, 0x88, 0x85, 0x4f, 0x80, 0xc4, 0x8e, 0x98, + 0xd9, 0x19, 0xd8, 0x18, 0x3b, 0x56, 0x62, 0x61, 0x02, 0x94, 0xf0, 0x41, 0x90, 0xef, 0xce, 0x8d, + 0xa3, 0x20, 0x13, 0x4f, 0x39, 0x5d, 0xde, 0xef, 0xfd, 0xfe, 0xbc, 0xe7, 0x83, 0x4e, 0x1c, 0x84, + 0x94, 0xa5, 0xe9, 0x5e, 0x1c, 0x32, 0x15, 0x8b, 0x44, 0x52, 0x95, 0xb1, 0x44, 0xbe, 0xe4, 0x19, + 0x1d, 0xf9, 0x74, 0x7f, 0xc8, 0xb3, 0x03, 0x92, 0x66, 0x42, 0x09, 0xbc, 0x1a, 0x07, 0x21, 0x29, + 0x57, 0x92, 0xa2, 0x92, 0x8c, 0x7c, 0x67, 0x25, 0x12, 0x91, 0xd0, 0x85, 0x34, 0x3f, 0x19, 0x8c, + 0x73, 0x25, 0x14, 0x72, 0x20, 0x24, 0x0d, 0x98, 0xe4, 0xa6, 0x19, 0x1d, 0xf9, 0x01, 0x57, 0xcc, + 0xa7, 0x29, 0x8b, 0xe2, 0x44, 0x37, 0xb2, 0xb5, 0x1b, 0x95, 0x4a, 0x8e, 0xb9, 0x4c, 0xf1, 0x6a, + 0x24, 0x44, 0xb4, 0xc7, 0x29, 0x4b, 0x63, 0xca, 0x92, 0x44, 0x28, 0x2b, 0x49, 0xff, 0xeb, 0x5d, + 0x85, 0x73, 0x8f, 0x72, 0xb2, 0x7b, 0x3c, 0x11, 0x83, 0x27, 0x19, 0x0b, 0x79, 0x97, 0xef, 0x0f, + 0xb9, 0x54, 0x18, 0xc3, 0x72, 0x9f, 0xc9, 0x7e, 0x1b, 0xad, 0xa3, 0xce, 0xa9, 0xae, 0x3e, 0x7b, + 0x3d, 0x38, 0x3f, 0x57, 0x2d, 0x53, 0x91, 0x48, 0x8e, 0x1f, 0x42, 0xab, 0x97, 0xdf, 0xbe, 0x50, + 0xf9, 0xb5, 0x46, 0xb5, 0x36, 0x3b, 0xa4, 0x2a, 0x09, 0x52, 0x6a, 0x03, 0xbd, 0xe3, 0xb3, 0xc7, + 0xe6, 0x58, 0x64, 0x21, 0xea, 0x3e, 0xc0, 0x34, 0x0d, 0x4b, 0x72, 0x91, 0x98, 0xe8, 0x48, 0x1e, + 0x1d, 0x31, 0x73, 0xb0, 0xd1, 0x91, 0x5d, 0x16, 0x15, 0x86, 0xba, 0x25, 0xa4, 0xf7, 0x0d, 0x41, + 0x7b, 0x9e, 0xc3, 0x5a, 0x79, 0x0e, 0xa7, 0x4b, 0x56, 0x64, 0x1b, 0xad, 0x9f, 0xa8, 0xe3, 0x65, + 0xe7, 0xcc, 0xe1, 0xcf, 0xb5, 0xc6, 0xe7, 0x5f, 0x6b, 0x4d, 0xdb, 0xb7, 0x35, 0xf5, 0x26, 0xf1, + 0x83, 0x19, 0x07, 0x4b, 0xda, 0xc1, 0xa5, 0xff, 0x3a, 0x30, 0xca, 0x66, 0x2c, 0xac, 0x00, 0xd6, + 0x0e, 0x76, 0x59, 0xc6, 0x06, 0x45, 0x40, 0xde, 0x63, 0x38, 0x3b, 0x73, 0x6b, 0x2d, 0xdd, 0x85, + 0x66, 0xaa, 0x6f, 0x6c, 0x66, 0x17, 0xaa, 0xcd, 0x58, 0xb4, 0xc5, 0x6c, 0x7e, 0x5c, 0x86, 0x93, + 0xba, 0x2b, 0xfe, 0x8a, 0x00, 0xa6, 0x4e, 0xf1, 0xcd, 0xea, 0x36, 0xff, 0xde, 0x2c, 0xe7, 0x56, + 0x4d, 0x94, 0xf1, 0xe0, 0x6d, 0xbf, 0xfb, 0xfe, 0xe7, 0xc3, 0xd2, 0x1d, 0xbc, 0x45, 0xab, 0xd6, + 0xdf, 0x7c, 0x32, 0xe5, 0xf9, 0xd1, 0x37, 0xf9, 0xee, 0xbe, 0xc5, 0x5f, 0x10, 0xb4, 0x4a, 0xe3, + 0xc6, 0xf5, 0x64, 0x14, 0x09, 0x3b, 0x5b, 0x75, 0x61, 0x56, 0xfe, 0x6d, 0x2d, 0xdf, 0xc7, 0xb4, + 0xa6, 0x7c, 0xfc, 0x09, 0x41, 0xd3, 0x0c, 0x04, 0x5f, 0x5f, 0x80, 0x7b, 0x66, 0x1f, 0x1c, 0xbf, + 0x06, 0xc2, 0x0a, 0xf5, 0xb5, 0xd0, 0x0d, 0x7c, 0x79, 0x01, 0xa1, 0x66, 0x41, 0x76, 0x9e, 0x1e, + 0x8e, 0x5d, 0x74, 0x34, 0x76, 0xd1, 0xef, 0xb1, 0x8b, 0xde, 0x4f, 0xdc, 0xc6, 0xd1, 0xc4, 0x6d, + 0xfc, 0x98, 0xb8, 0x8d, 0x67, 0xdb, 0x51, 0xac, 0xfa, 0xc3, 0x80, 0x84, 0x62, 0x40, 0xed, 0x0b, + 0x67, 0x7e, 0xae, 0xc9, 0xde, 0x2b, 0xfa, 0xba, 0x82, 0x42, 0x1d, 0xa4, 0x5c, 0x06, 0x4d, 0xfd, + 0x4c, 0xdd, 0xf8, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x7f, 0xfe, 0xbd, 0x7d, 0x05, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // DenomTrace queries a denomination trace information. + DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) + // DenomTraces queries all denomination traces. + DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) + // Params queries all parameters of the ibc-transfer module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) { + out := new(QueryDenomTraceResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/DenomTrace", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) { + out := new(QueryDenomTracesResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/DenomTraces", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // DenomTrace queries a denomination trace information. + DenomTrace(context.Context, *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error) + // DenomTraces queries all denomination traces. + DenomTraces(context.Context, *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error) + // Params queries all parameters of the ibc-transfer module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) DenomTrace(ctx context.Context, req *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DenomTrace not implemented") +} +func (*UnimplementedQueryServer) DenomTraces(ctx context.Context, req *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DenomTraces not implemented") +} +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_DenomTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDenomTraceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DenomTrace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.transfer.v1.Query/DenomTrace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DenomTrace(ctx, req.(*QueryDenomTraceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_DenomTraces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDenomTracesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DenomTraces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.transfer.v1.Query/DenomTraces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DenomTraces(ctx, req.(*QueryDenomTracesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.transfer.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibc.applications.transfer.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DenomTrace", + Handler: _Query_DenomTrace_Handler, + }, + { + MethodName: "DenomTraces", + Handler: _Query_DenomTraces_Handler, + }, + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibc/applications/transfer/v1/query.proto", +} + +func (m *QueryDenomTraceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDenomTraceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDenomTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDenomTraceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDenomTraceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDenomTraceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DenomTrace != nil { + { + size, err := m.DenomTrace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDenomTracesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDenomTracesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDenomTracesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDenomTracesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDenomTracesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDenomTracesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DenomTraces) > 0 { + for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Params != nil { + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryDenomTraceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDenomTraceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DenomTrace != nil { + l = m.DenomTrace.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDenomTracesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDenomTracesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DenomTraces) > 0 { + for _, e := range m.DenomTraces { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Params != nil { + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryDenomTraceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDenomTraceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDenomTraceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDenomTraceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDenomTraceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDenomTraceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomTrace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DenomTrace == nil { + m.DenomTrace = &DenomTrace{} + } + if err := m.DenomTrace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDenomTracesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDenomTracesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDenomTracesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDenomTracesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDenomTracesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDenomTracesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomTraces = append(m.DenomTraces, DenomTrace{}) + if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Params == nil { + m.Params = &Params{} + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/applications/transfer/types/query.pb.gw.go b/applications/transfer/types/query.pb.gw.go new file mode 100644 index 0000000000..007ed66820 --- /dev/null +++ b/applications/transfer/types/query.pb.gw.go @@ -0,0 +1,326 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: ibc/applications/transfer/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDenomTraceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash") + } + + protoReq.Hash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err) + } + + msg, err := client.DenomTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDenomTraceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash") + } + + protoReq.Hash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err) + } + + msg, err := server.DenomTrace(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_DenomTraces_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDenomTracesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DenomTraces(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDenomTracesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DenomTraces(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DenomTrace_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DenomTraces_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DenomTrace_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DenomTraces_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_DenomTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "applications", "transfer", "v1beta1", "denom_traces", "hash"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_DenomTraces_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "applications", "transfer", "v1beta1", "denom_traces"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "applications", "transfer", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_DenomTrace_0 = runtime.ForwardResponseMessage + + forward_Query_DenomTraces_0 = runtime.ForwardResponseMessage + + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/applications/transfer/types/trace.go b/applications/transfer/types/trace.go new file mode 100644 index 0000000000..f45113efa3 --- /dev/null +++ b/applications/transfer/types/trace.go @@ -0,0 +1,203 @@ +package types + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "sort" + "strings" + + tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmtypes "github.com/tendermint/tendermint/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// ParseDenomTrace parses a string with the ibc prefix (denom trace) and the base denomination +// into a DenomTrace type. +// +// Examples: +// +// - "portidone/channelidone/uatom" => DenomTrace{Path: "portidone/channelidone", BaseDenom: "uatom"} +// - "uatom" => DenomTrace{Path: "", BaseDenom: "uatom"} +func ParseDenomTrace(rawDenom string) DenomTrace { + denomSplit := strings.Split(rawDenom, "/") + + if denomSplit[0] == rawDenom { + return DenomTrace{ + Path: "", + BaseDenom: rawDenom, + } + } + + return DenomTrace{ + Path: strings.Join(denomSplit[:len(denomSplit)-1], "/"), + BaseDenom: denomSplit[len(denomSplit)-1], + } +} + +// Hash returns the hex bytes of the SHA256 hash of the DenomTrace fields using the following formula: +// +// hash = sha256(tracePath + "/" + baseDenom) +func (dt DenomTrace) Hash() tmbytes.HexBytes { + hash := sha256.Sum256([]byte(dt.GetFullDenomPath())) + return hash[:] +} + +// GetPrefix returns the receiving denomination prefix composed by the trace info and a separator. +func (dt DenomTrace) GetPrefix() string { + return dt.Path + "/" +} + +// IBCDenom a coin denomination for an ICS20 fungible token in the format +// 'ibc/{hash(tracePath + baseDenom)}'. If the trace is empty, it will return the base denomination. +func (dt DenomTrace) IBCDenom() string { + if dt.Path != "" { + return fmt.Sprintf("%s/%s", DenomPrefix, dt.Hash()) + } + return dt.BaseDenom +} + +// GetFullDenomPath returns the full denomination according to the ICS20 specification: +// tracePath + "/" + baseDenom +// If there exists no trace then the base denomination is returned. +func (dt DenomTrace) GetFullDenomPath() string { + if dt.Path == "" { + return dt.BaseDenom + } + return dt.GetPrefix() + dt.BaseDenom +} + +func validateTraceIdentifiers(identifiers []string) error { + if len(identifiers) == 0 || len(identifiers)%2 != 0 { + return fmt.Errorf("trace info must come in pairs of port and channel identifiers '{portID}/{channelID}', got the identifiers: %s", identifiers) + } + + // validate correctness of port and channel identifiers + for i := 0; i < len(identifiers); i += 2 { + if err := host.PortIdentifierValidator(identifiers[i]); err != nil { + return sdkerrors.Wrapf(err, "invalid port ID at position %d", i) + } + if err := host.ChannelIdentifierValidator(identifiers[i+1]); err != nil { + return sdkerrors.Wrapf(err, "invalid channel ID at position %d", i) + } + } + return nil +} + +// Validate performs a basic validation of the DenomTrace fields. +func (dt DenomTrace) Validate() error { + // empty trace is accepted when token lives on the original chain + switch { + case dt.Path == "" && dt.BaseDenom != "": + return nil + case strings.TrimSpace(dt.BaseDenom) == "": + return fmt.Errorf("base denomination cannot be blank") + } + + // NOTE: no base denomination validation + + identifiers := strings.Split(dt.Path, "/") + return validateTraceIdentifiers(identifiers) +} + +// Traces defines a wrapper type for a slice of DenomTrace. +type Traces []DenomTrace + +// Validate performs a basic validation of each denomination trace info. +func (t Traces) Validate() error { + seenTraces := make(map[string]bool) + for i, trace := range t { + hash := trace.Hash().String() + if seenTraces[hash] { + return fmt.Errorf("duplicated denomination trace with hash %s", trace.Hash()) + } + + if err := trace.Validate(); err != nil { + return sdkerrors.Wrapf(err, "failed denom trace %d validation", i) + } + seenTraces[hash] = true + } + return nil +} + +var _ sort.Interface = Traces{} + +// Len implements sort.Interface for Traces +func (t Traces) Len() int { return len(t) } + +// Less implements sort.Interface for Traces +func (t Traces) Less(i, j int) bool { return t[i].GetFullDenomPath() < t[j].GetFullDenomPath() } + +// Swap implements sort.Interface for Traces +func (t Traces) Swap(i, j int) { t[i], t[j] = t[j], t[i] } + +// Sort is a helper function to sort the set of denomination traces in-place +func (t Traces) Sort() Traces { + sort.Sort(t) + return t +} + +// ValidatePrefixedDenom checks that the denomination for an IBC fungible token packet denom is correctly prefixed. +// The function will return no error if the given string follows one of the two formats: +// +// - Prefixed denomination: '{portIDN}/{channelIDN}/.../{portID0}/{channelID0}/baseDenom' +// - Unprefixed denomination: 'baseDenom' +func ValidatePrefixedDenom(denom string) error { + denomSplit := strings.Split(denom, "/") + if denomSplit[0] == denom && strings.TrimSpace(denom) != "" { + // NOTE: no base denomination validation + return nil + } + + if strings.TrimSpace(denomSplit[len(denomSplit)-1]) == "" { + return sdkerrors.Wrap(ErrInvalidDenomForTransfer, "base denomination cannot be blank") + } + + identifiers := denomSplit[:len(denomSplit)-1] + return validateTraceIdentifiers(identifiers) +} + +// ValidateIBCDenom validates that the given denomination is either: +// +// - A valid base denomination (eg: 'uatom') +// - A valid fungible token representation (i.e 'ibc/{hash}') per ADR 001 https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-001-coin-source-tracing.md +func ValidateIBCDenom(denom string) error { + if err := sdk.ValidateDenom(denom); err != nil { + return err + } + + denomSplit := strings.SplitN(denom, "/", 2) + + switch { + case strings.TrimSpace(denom) == "", + len(denomSplit) == 1 && denomSplit[0] == DenomPrefix, + len(denomSplit) == 2 && (denomSplit[0] != DenomPrefix || strings.TrimSpace(denomSplit[1]) == ""): + return sdkerrors.Wrapf(ErrInvalidDenomForTransfer, "denomination should be prefixed with the format 'ibc/{hash(trace + \"/\" + %s)}'", denom) + + case denomSplit[0] == denom && strings.TrimSpace(denom) != "": + return nil + } + + if _, err := ParseHexHash(denomSplit[1]); err != nil { + return sdkerrors.Wrapf(err, "invalid denom trace hash %s", denomSplit[1]) + } + + return nil +} + +// ParseHexHash parses a hex hash in string format to bytes and validates its correctness. +func ParseHexHash(hexHash string) (tmbytes.HexBytes, error) { + hash, err := hex.DecodeString(hexHash) + if err != nil { + return nil, err + } + + if err := tmtypes.ValidateHash(hash); err != nil { + return nil, err + } + + return hash, nil +} diff --git a/applications/transfer/types/trace_test.go b/applications/transfer/types/trace_test.go new file mode 100644 index 0000000000..f0868d5680 --- /dev/null +++ b/applications/transfer/types/trace_test.go @@ -0,0 +1,150 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseDenomTrace(t *testing.T) { + testCases := []struct { + name string + denom string + expTrace DenomTrace + }{ + {"empty denom", "", DenomTrace{}}, + {"base denom", "uatom", DenomTrace{BaseDenom: "uatom"}}, + {"trace info", "transfer/channelToA/uatom", DenomTrace{BaseDenom: "uatom", Path: "transfer/channelToA"}}, + {"incomplete path", "transfer/uatom", DenomTrace{BaseDenom: "uatom", Path: "transfer"}}, + {"invalid path (1)", "transfer//uatom", DenomTrace{BaseDenom: "uatom", Path: "transfer/"}}, + {"invalid path (2)", "transfer/channelToA/uatom/", DenomTrace{BaseDenom: "", Path: "transfer/channelToA/uatom"}}, + } + + for _, tc := range testCases { + trace := ParseDenomTrace(tc.denom) + require.Equal(t, tc.expTrace, trace, tc.name) + } +} + +func TestDenomTrace_IBCDenom(t *testing.T) { + testCases := []struct { + name string + trace DenomTrace + expDenom string + }{ + {"base denom", DenomTrace{BaseDenom: "uatom"}, "uatom"}, + {"trace info", DenomTrace{BaseDenom: "uatom", Path: "transfer/channelToA"}, "ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2"}, + } + + for _, tc := range testCases { + denom := tc.trace.IBCDenom() + require.Equal(t, tc.expDenom, denom, tc.name) + } +} + +func TestDenomTrace_Validate(t *testing.T) { + testCases := []struct { + name string + trace DenomTrace + expError bool + }{ + {"base denom only", DenomTrace{BaseDenom: "uatom"}, false}, + {"empty DenomTrace", DenomTrace{}, true}, + {"valid single trace info", DenomTrace{BaseDenom: "uatom", Path: "transfer/channelToA"}, false}, + {"valid multiple trace info", DenomTrace{BaseDenom: "uatom", Path: "transfer/channelToA/transfer/channelToB"}, false}, + {"single trace identifier", DenomTrace{BaseDenom: "uatom", Path: "transfer"}, true}, + {"invalid port ID", DenomTrace{BaseDenom: "uatom", Path: "(transfer)/channelToA"}, true}, + {"invalid channel ID", DenomTrace{BaseDenom: "uatom", Path: "transfer/(channelToA)"}, true}, + {"empty base denom with trace", DenomTrace{BaseDenom: "", Path: "transfer/channelToA"}, true}, + } + + for _, tc := range testCases { + err := tc.trace.Validate() + if tc.expError { + require.Error(t, err, tc.name) + continue + } + require.NoError(t, err, tc.name) + } +} + +func TestTraces_Validate(t *testing.T) { + testCases := []struct { + name string + traces Traces + expError bool + }{ + {"empty Traces", Traces{}, false}, + {"valid multiple trace info", Traces{{BaseDenom: "uatom", Path: "transfer/channelToA/transfer/channelToB"}}, false}, + { + "valid multiple trace info", + Traces{ + {BaseDenom: "uatom", Path: "transfer/channelToA/transfer/channelToB"}, + {BaseDenom: "uatom", Path: "transfer/channelToA/transfer/channelToB"}, + }, + true, + }, + {"empty base denom with trace", Traces{{BaseDenom: "", Path: "transfer/channelToA"}}, true}, + } + + for _, tc := range testCases { + err := tc.traces.Validate() + if tc.expError { + require.Error(t, err, tc.name) + continue + } + require.NoError(t, err, tc.name) + } +} + +func TestValidatePrefixedDenom(t *testing.T) { + testCases := []struct { + name string + denom string + expError bool + }{ + {"prefixed denom", "transfer/channelToA/uatom", false}, + {"base denom", "uatom", false}, + {"empty denom", "", true}, + {"empty prefix", "/uatom", true}, + {"empty identifiers", "//uatom", true}, + {"single trace identifier", "transfer/", true}, + {"invalid port ID", "(transfer)/channelToA/uatom", true}, + {"invalid channel ID", "transfer/(channelToA)/uatom", true}, + } + + for _, tc := range testCases { + err := ValidatePrefixedDenom(tc.denom) + if tc.expError { + require.Error(t, err, tc.name) + continue + } + require.NoError(t, err, tc.name) + } +} + +func TestValidateIBCDenom(t *testing.T) { + testCases := []struct { + name string + denom string + expError bool + }{ + {"denom with trace hash", "ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", false}, + {"base denom", "uatom", false}, + {"empty denom", "", true}, + {"invalid prefixed denom", "transfer/channelToA/uatom", true}, + {"denom 'ibc'", "ibc", true}, + {"denom 'ibc/'", "ibc/", true}, + {"invald prefix", "notibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2", true}, + {"invald hash", "ibc/!@#$!@#", true}, + } + + for _, tc := range testCases { + err := ValidateIBCDenom(tc.denom) + if tc.expError { + require.Error(t, err, tc.name) + continue + } + require.NoError(t, err, tc.name) + } +} diff --git a/applications/transfer/types/transfer.pb.go b/applications/transfer/types/transfer.pb.go new file mode 100644 index 0000000000..62734b85a4 --- /dev/null +++ b/applications/transfer/types/transfer.pb.go @@ -0,0 +1,909 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/transfer/v1/transfer.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// FungibleTokenPacketData defines a struct for the packet payload +// See FungibleTokenPacketData spec: +// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures +type FungibleTokenPacketData struct { + // the token denomination to be transferred + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + // the token amount to be transferred + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + // the sender address + Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` + // the recipient address on the destination chain + Receiver string `protobuf:"bytes,4,opt,name=receiver,proto3" json:"receiver,omitempty"` +} + +func (m *FungibleTokenPacketData) Reset() { *m = FungibleTokenPacketData{} } +func (m *FungibleTokenPacketData) String() string { return proto.CompactTextString(m) } +func (*FungibleTokenPacketData) ProtoMessage() {} +func (*FungibleTokenPacketData) Descriptor() ([]byte, []int) { + return fileDescriptor_5041673e96e97901, []int{0} +} +func (m *FungibleTokenPacketData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FungibleTokenPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FungibleTokenPacketData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FungibleTokenPacketData) XXX_Merge(src proto.Message) { + xxx_messageInfo_FungibleTokenPacketData.Merge(m, src) +} +func (m *FungibleTokenPacketData) XXX_Size() int { + return m.Size() +} +func (m *FungibleTokenPacketData) XXX_DiscardUnknown() { + xxx_messageInfo_FungibleTokenPacketData.DiscardUnknown(m) +} + +var xxx_messageInfo_FungibleTokenPacketData proto.InternalMessageInfo + +func (m *FungibleTokenPacketData) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *FungibleTokenPacketData) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *FungibleTokenPacketData) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *FungibleTokenPacketData) GetReceiver() string { + if m != nil { + return m.Receiver + } + return "" +} + +// DenomTrace contains the base denomination for ICS20 fungible tokens and the +// source tracing information path. +type DenomTrace struct { + // path defines the chain of port/channel identifiers used for tracing the + // source of the fungible token. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // base denomination of the relayed fungible token. + BaseDenom string `protobuf:"bytes,2,opt,name=base_denom,json=baseDenom,proto3" json:"base_denom,omitempty"` +} + +func (m *DenomTrace) Reset() { *m = DenomTrace{} } +func (m *DenomTrace) String() string { return proto.CompactTextString(m) } +func (*DenomTrace) ProtoMessage() {} +func (*DenomTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_5041673e96e97901, []int{1} +} +func (m *DenomTrace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DenomTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DenomTrace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DenomTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_DenomTrace.Merge(m, src) +} +func (m *DenomTrace) XXX_Size() int { + return m.Size() +} +func (m *DenomTrace) XXX_DiscardUnknown() { + xxx_messageInfo_DenomTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_DenomTrace proto.InternalMessageInfo + +func (m *DenomTrace) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *DenomTrace) GetBaseDenom() string { + if m != nil { + return m.BaseDenom + } + return "" +} + +// Params defines the set of IBC transfer parameters. +// NOTE: To prevent a single token from being transferred, set the +// TransfersEnabled parameter to true and then set the bank module's SendEnabled +// parameter for the denomination to false. +type Params struct { + // send_enabled enables or disables all cross-chain token transfers from this + // chain. + SendEnabled bool `protobuf:"varint,1,opt,name=send_enabled,json=sendEnabled,proto3" json:"send_enabled,omitempty" yaml:"send_enabled"` + // receive_enabled enables or disables all cross-chain token transfers to this + // chain. + ReceiveEnabled bool `protobuf:"varint,2,opt,name=receive_enabled,json=receiveEnabled,proto3" json:"receive_enabled,omitempty" yaml:"receive_enabled"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_5041673e96e97901, []int{2} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetSendEnabled() bool { + if m != nil { + return m.SendEnabled + } + return false +} + +func (m *Params) GetReceiveEnabled() bool { + if m != nil { + return m.ReceiveEnabled + } + return false +} + +func init() { + proto.RegisterType((*FungibleTokenPacketData)(nil), "ibc.applications.transfer.v1.FungibleTokenPacketData") + proto.RegisterType((*DenomTrace)(nil), "ibc.applications.transfer.v1.DenomTrace") + proto.RegisterType((*Params)(nil), "ibc.applications.transfer.v1.Params") +} + +func init() { + proto.RegisterFile("ibc/applications/transfer/v1/transfer.proto", fileDescriptor_5041673e96e97901) +} + +var fileDescriptor_5041673e96e97901 = []byte{ + // 362 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x41, 0x6b, 0xe2, 0x40, + 0x14, 0xc7, 0x8d, 0xeb, 0x8a, 0xce, 0x2e, 0xbb, 0x30, 0x2b, 0x1a, 0x64, 0x1b, 0x25, 0x27, 0xa1, + 0x34, 0x41, 0x7a, 0xf3, 0xd0, 0x82, 0xb5, 0x3d, 0x4b, 0xf0, 0x50, 0x7a, 0x91, 0xc9, 0xe4, 0x35, + 0x06, 0x93, 0x99, 0x30, 0x33, 0x4a, 0xa5, 0x9f, 0xa0, 0xb7, 0x7e, 0xac, 0x1e, 0x3d, 0xf6, 0x24, + 0x45, 0xbf, 0x81, 0x9f, 0xa0, 0x64, 0x12, 0x82, 0x14, 0x7a, 0x9a, 0xf7, 0x7b, 0xef, 0xff, 0xff, + 0xcf, 0x83, 0x87, 0xce, 0x23, 0x9f, 0xba, 0x24, 0x4d, 0xe3, 0x88, 0x12, 0x15, 0x71, 0x26, 0x5d, + 0x25, 0x08, 0x93, 0x8f, 0x20, 0xdc, 0xf5, 0xb0, 0xac, 0x9d, 0x54, 0x70, 0xc5, 0xf1, 0xff, 0xc8, + 0xa7, 0xce, 0xa9, 0xd8, 0x29, 0x05, 0xeb, 0x61, 0xb7, 0x15, 0xf2, 0x90, 0x6b, 0xa1, 0x9b, 0x55, + 0xb9, 0xc7, 0x7e, 0x46, 0x9d, 0xbb, 0x15, 0x0b, 0x23, 0x3f, 0x86, 0x19, 0x5f, 0x02, 0x9b, 0x12, + 0xba, 0x04, 0x35, 0x21, 0x8a, 0xe0, 0x16, 0xfa, 0x19, 0x00, 0xe3, 0x89, 0x69, 0xf4, 0x8d, 0x41, + 0xd3, 0xcb, 0x01, 0xb7, 0x51, 0x9d, 0x24, 0x7c, 0xc5, 0x94, 0x59, 0xed, 0x1b, 0x83, 0x9a, 0x57, + 0x50, 0xd6, 0x97, 0xc0, 0x02, 0x10, 0xe6, 0x0f, 0x2d, 0x2f, 0x08, 0x77, 0x51, 0x43, 0x00, 0x85, + 0x68, 0x0d, 0xc2, 0xac, 0xe9, 0x49, 0xc9, 0xf6, 0x35, 0x42, 0x93, 0x2c, 0x74, 0x26, 0x08, 0x05, + 0x8c, 0x51, 0x2d, 0x25, 0x6a, 0x51, 0x7c, 0xa7, 0x6b, 0x7c, 0x86, 0x90, 0x4f, 0x24, 0xcc, 0xf3, + 0x45, 0xaa, 0x7a, 0xd2, 0xcc, 0x3a, 0xda, 0x67, 0xbf, 0x18, 0xa8, 0x3e, 0x25, 0x82, 0x24, 0x12, + 0x8f, 0xd0, 0xef, 0xec, 0xc7, 0x39, 0x30, 0xe2, 0xc7, 0x10, 0xe8, 0x94, 0xc6, 0xb8, 0x73, 0xdc, + 0xf5, 0xfe, 0x6d, 0x48, 0x12, 0x8f, 0xec, 0xd3, 0xa9, 0xed, 0xfd, 0xca, 0xf0, 0x36, 0x27, 0x7c, + 0x83, 0xfe, 0x16, 0x3b, 0x95, 0xf6, 0xaa, 0xb6, 0x77, 0x8f, 0xbb, 0x5e, 0x3b, 0xb7, 0x7f, 0x11, + 0xd8, 0xde, 0x9f, 0xa2, 0x53, 0x84, 0x8c, 0xef, 0xdf, 0xf6, 0x96, 0xb1, 0xdd, 0x5b, 0xc6, 0xc7, + 0xde, 0x32, 0x5e, 0x0f, 0x56, 0x65, 0x7b, 0xb0, 0x2a, 0xef, 0x07, 0xab, 0xf2, 0x70, 0x15, 0x46, + 0x6a, 0xb1, 0xf2, 0x1d, 0xca, 0x13, 0x97, 0x72, 0x99, 0x70, 0x59, 0x3c, 0x17, 0x32, 0x58, 0xba, + 0x4f, 0xee, 0xf7, 0x37, 0x56, 0x9b, 0x14, 0xa4, 0x5f, 0xd7, 0xa7, 0xba, 0xfc, 0x0c, 0x00, 0x00, + 0xff, 0xff, 0x46, 0x73, 0x85, 0x0b, 0x0d, 0x02, 0x00, 0x00, +} + +func (m *FungibleTokenPacketData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FungibleTokenPacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FungibleTokenPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x22 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x1a + } + if m.Amount != 0 { + i = encodeVarintTransfer(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DenomTrace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DenomTrace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DenomTrace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BaseDenom) > 0 { + i -= len(m.BaseDenom) + copy(dAtA[i:], m.BaseDenom) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.BaseDenom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ReceiveEnabled { + i-- + if m.ReceiveEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.SendEnabled { + i-- + if m.SendEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTransfer(dAtA []byte, offset int, v uint64) int { + offset -= sovTransfer(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FungibleTokenPacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovTransfer(uint64(m.Amount)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + return n +} + +func (m *DenomTrace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + l = len(m.BaseDenom) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + return n +} + +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SendEnabled { + n += 2 + } + if m.ReceiveEnabled { + n += 2 + } + return n +} + +func sovTransfer(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTransfer(x uint64) (n int) { + return sovTransfer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *FungibleTokenPacketData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FungibleTokenPacketData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FungibleTokenPacketData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTransfer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTransfer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DenomTrace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DenomTrace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DenomTrace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaseDenom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BaseDenom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTransfer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTransfer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SendEnabled = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReceiveEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReceiveEnabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTransfer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTransfer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTransfer(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTransfer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTransfer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTransfer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTransfer + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTransfer + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTransfer + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTransfer = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTransfer = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTransfer = fmt.Errorf("proto: unexpected end of group") +) diff --git a/applications/transfer/types/tx.pb.go b/applications/transfer/types/tx.pb.go new file mode 100644 index 0000000000..e3a630b427 --- /dev/null +++ b/applications/transfer/types/tx.pb.go @@ -0,0 +1,804 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/transfer/v1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + types1 "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between +// ICS20 enabled chains. See ICS Spec here: +// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures +type MsgTransfer struct { + // the port on which the packet will be sent + SourcePort string `protobuf:"bytes,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty" yaml:"source_port"` + // the channel by which the packet will be sent + SourceChannel string `protobuf:"bytes,2,opt,name=source_channel,json=sourceChannel,proto3" json:"source_channel,omitempty" yaml:"source_channel"` + // the tokens to be transferred + Token types.Coin `protobuf:"bytes,3,opt,name=token,proto3" json:"token"` + // the sender address + Sender string `protobuf:"bytes,4,opt,name=sender,proto3" json:"sender,omitempty"` + // the recipient address on the destination chain + Receiver string `protobuf:"bytes,5,opt,name=receiver,proto3" json:"receiver,omitempty"` + // Timeout height relative to the current block height. + // The timeout is disabled when set to 0. + TimeoutHeight types1.Height `protobuf:"bytes,6,opt,name=timeout_height,json=timeoutHeight,proto3" json:"timeout_height" yaml:"timeout_height"` + // Timeout timestamp (in nanoseconds) relative to the current block timestamp. + // The timeout is disabled when set to 0. + TimeoutTimestamp uint64 `protobuf:"varint,7,opt,name=timeout_timestamp,json=timeoutTimestamp,proto3" json:"timeout_timestamp,omitempty" yaml:"timeout_timestamp"` +} + +func (m *MsgTransfer) Reset() { *m = MsgTransfer{} } +func (m *MsgTransfer) String() string { return proto.CompactTextString(m) } +func (*MsgTransfer) ProtoMessage() {} +func (*MsgTransfer) Descriptor() ([]byte, []int) { + return fileDescriptor_7401ed9bed2f8e09, []int{0} +} +func (m *MsgTransfer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTransfer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTransfer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTransfer) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTransfer.Merge(m, src) +} +func (m *MsgTransfer) XXX_Size() int { + return m.Size() +} +func (m *MsgTransfer) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTransfer.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTransfer proto.InternalMessageInfo + +// MsgTransferResponse defines the Msg/Transfer response type. +type MsgTransferResponse struct { +} + +func (m *MsgTransferResponse) Reset() { *m = MsgTransferResponse{} } +func (m *MsgTransferResponse) String() string { return proto.CompactTextString(m) } +func (*MsgTransferResponse) ProtoMessage() {} +func (*MsgTransferResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7401ed9bed2f8e09, []int{1} +} +func (m *MsgTransferResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTransferResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTransferResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTransferResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTransferResponse.Merge(m, src) +} +func (m *MsgTransferResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgTransferResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTransferResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTransferResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgTransfer)(nil), "ibc.applications.transfer.v1.MsgTransfer") + proto.RegisterType((*MsgTransferResponse)(nil), "ibc.applications.transfer.v1.MsgTransferResponse") +} + +func init() { + proto.RegisterFile("ibc/applications/transfer/v1/tx.proto", fileDescriptor_7401ed9bed2f8e09) +} + +var fileDescriptor_7401ed9bed2f8e09 = []byte{ + // 488 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0x13, 0xd6, 0x95, 0xe2, 0x6a, 0x13, 0x18, 0x36, 0x65, 0xd5, 0x48, 0xaa, 0x48, 0x48, + 0xe5, 0x80, 0xad, 0x0c, 0x21, 0xa4, 0x1d, 0x10, 0xca, 0x2e, 0x70, 0x98, 0x84, 0xa2, 0x1d, 0x10, + 0x97, 0x91, 0x78, 0x26, 0xb1, 0xd6, 0xd8, 0x91, 0xed, 0x46, 0xdb, 0x37, 0xe0, 0xc8, 0x47, 0xd8, + 0x99, 0x4f, 0xb2, 0xe3, 0x8e, 0x9c, 0x2a, 0xd4, 0x5e, 0x38, 0xf7, 0x13, 0xa0, 0xc4, 0x6e, 0x69, + 0x0f, 0x20, 0x4e, 0xf1, 0x7b, 0xff, 0xdf, 0xf3, 0x5f, 0xcf, 0xef, 0x05, 0x3c, 0x63, 0x19, 0xc1, + 0x69, 0x55, 0x8d, 0x19, 0x49, 0x35, 0x13, 0x5c, 0x61, 0x2d, 0x53, 0xae, 0xbe, 0x50, 0x89, 0xeb, + 0x08, 0xeb, 0x2b, 0x54, 0x49, 0xa1, 0x05, 0x3c, 0x64, 0x19, 0x41, 0xeb, 0x18, 0x5a, 0x62, 0xa8, + 0x8e, 0x06, 0x4f, 0x72, 0x91, 0x8b, 0x16, 0xc4, 0xcd, 0xc9, 0xd4, 0x0c, 0x7c, 0x22, 0x54, 0x29, + 0x14, 0xce, 0x52, 0x45, 0x71, 0x1d, 0x65, 0x54, 0xa7, 0x11, 0x26, 0x82, 0x71, 0xab, 0x07, 0x8d, + 0x35, 0x11, 0x92, 0x62, 0x32, 0x66, 0x94, 0xeb, 0xc6, 0xd0, 0x9c, 0x0c, 0x10, 0x7e, 0xdf, 0x02, + 0xfd, 0x53, 0x95, 0x9f, 0x59, 0x27, 0xf8, 0x1a, 0xf4, 0x95, 0x98, 0x48, 0x42, 0xcf, 0x2b, 0x21, + 0xb5, 0xe7, 0x0e, 0xdd, 0xd1, 0x83, 0x78, 0x7f, 0x31, 0x0d, 0xe0, 0x75, 0x5a, 0x8e, 0x8f, 0xc3, + 0x35, 0x31, 0x4c, 0x80, 0x89, 0x3e, 0x08, 0xa9, 0xe1, 0x5b, 0xb0, 0x6b, 0x35, 0x52, 0xa4, 0x9c, + 0xd3, 0xb1, 0x77, 0xaf, 0xad, 0x3d, 0x58, 0x4c, 0x83, 0xbd, 0x8d, 0x5a, 0xab, 0x87, 0xc9, 0x8e, + 0x49, 0x9c, 0x98, 0x18, 0xbe, 0x02, 0xdb, 0x5a, 0x5c, 0x52, 0xee, 0x6d, 0x0d, 0xdd, 0x51, 0xff, + 0xe8, 0x00, 0x99, 0xde, 0x50, 0xd3, 0x1b, 0xb2, 0xbd, 0xa1, 0x13, 0xc1, 0x78, 0xdc, 0xb9, 0x9d, + 0x06, 0x4e, 0x62, 0x68, 0xb8, 0x0f, 0xba, 0x8a, 0xf2, 0x0b, 0x2a, 0xbd, 0x4e, 0x63, 0x98, 0xd8, + 0x08, 0x0e, 0x40, 0x4f, 0x52, 0x42, 0x59, 0x4d, 0xa5, 0xb7, 0xdd, 0x2a, 0xab, 0x18, 0x7e, 0x06, + 0xbb, 0x9a, 0x95, 0x54, 0x4c, 0xf4, 0x79, 0x41, 0x59, 0x5e, 0x68, 0xaf, 0xdb, 0x7a, 0x0e, 0x50, + 0x33, 0x83, 0xe6, 0xbd, 0x90, 0x7d, 0xa5, 0x3a, 0x42, 0xef, 0x5a, 0x22, 0x7e, 0xda, 0x98, 0xfe, + 0x69, 0x66, 0xb3, 0x3e, 0x4c, 0x76, 0x6c, 0xc2, 0xd0, 0xf0, 0x3d, 0x78, 0xb4, 0x24, 0x9a, 0xaf, + 0xd2, 0x69, 0x59, 0x79, 0xf7, 0x87, 0xee, 0xa8, 0x13, 0x1f, 0x2e, 0xa6, 0x81, 0xb7, 0x79, 0xc9, + 0x0a, 0x09, 0x93, 0x87, 0x36, 0x77, 0xb6, 0x4c, 0x1d, 0xf7, 0xbe, 0xde, 0x04, 0xce, 0xaf, 0x9b, + 0xc0, 0x09, 0xf7, 0xc0, 0xe3, 0xb5, 0x59, 0x25, 0x54, 0x55, 0x82, 0x2b, 0x7a, 0x24, 0xc0, 0xd6, + 0xa9, 0xca, 0x61, 0x01, 0x7a, 0xab, 0x31, 0x3e, 0x47, 0xff, 0x5a, 0x26, 0xb4, 0x76, 0xcb, 0x20, + 0xfa, 0x6f, 0x74, 0x69, 0x18, 0x7f, 0xbc, 0x9d, 0xf9, 0xee, 0xdd, 0xcc, 0x77, 0x7f, 0xce, 0x7c, + 0xf7, 0xdb, 0xdc, 0x77, 0xee, 0xe6, 0xbe, 0xf3, 0x63, 0xee, 0x3b, 0x9f, 0xde, 0xe4, 0x4c, 0x17, + 0x93, 0x0c, 0x11, 0x51, 0x62, 0xbb, 0x9a, 0xe6, 0xf3, 0x42, 0x5d, 0x5c, 0xe2, 0x2b, 0xfc, 0xf7, + 0x3f, 0x41, 0x5f, 0x57, 0x54, 0x65, 0xdd, 0x76, 0x2b, 0x5f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, + 0x26, 0x76, 0x5b, 0xfa, 0x33, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // Transfer defines a rpc handler method for MsgTransfer. + Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) { + out := new(MsgTransferResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.transfer.v1.Msg/Transfer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // Transfer defines a rpc handler method for MsgTransfer. + Transfer(context.Context, *MsgTransfer) (*MsgTransferResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) Transfer(ctx context.Context, req *MsgTransfer) (*MsgTransferResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgTransfer) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Transfer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.transfer.v1.Msg/Transfer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Transfer(ctx, req.(*MsgTransfer)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibc.applications.transfer.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Transfer", + Handler: _Msg_Transfer_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibc/applications/transfer/v1/tx.proto", +} + +func (m *MsgTransfer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTransfer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTransfer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeoutTimestamp != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.TimeoutTimestamp)) + i-- + dAtA[i] = 0x38 + } + { + size, err := m.TimeoutHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintTx(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x2a + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTx(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x22 + } + { + size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.SourceChannel) > 0 { + i -= len(m.SourceChannel) + copy(dAtA[i:], m.SourceChannel) + i = encodeVarintTx(dAtA, i, uint64(len(m.SourceChannel))) + i-- + dAtA[i] = 0x12 + } + if len(m.SourcePort) > 0 { + i -= len(m.SourcePort) + copy(dAtA[i:], m.SourcePort) + i = encodeVarintTx(dAtA, i, uint64(len(m.SourcePort))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgTransferResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTransferResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTransferResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgTransfer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SourcePort) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.SourceChannel) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Token.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.TimeoutHeight.Size() + n += 1 + l + sovTx(uint64(l)) + if m.TimeoutTimestamp != 0 { + n += 1 + sovTx(uint64(m.TimeoutTimestamp)) + } + return n +} + +func (m *MsgTransferResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgTransfer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTransfer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTransfer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePort", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePort = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceChannel", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceChannel = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimeoutHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutTimestamp", wireType) + } + m.TimeoutTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutTimestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTransferResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTransferResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTransferResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/apps/transfer/types/genesis.pb.go b/apps/transfer/types/genesis.pb.go new file mode 100644 index 0000000000..b19173d8f4 --- /dev/null +++ b/apps/transfer/types/genesis.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/apps/transfer/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ibc-transfer genesis state +type GenesisState struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + DenomTraces Traces `protobuf:"bytes,2,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces" yaml:"denom_traces"` + Params Params `protobuf:"bytes,3,opt,name=params,proto3" json:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_19e19f3d07c11479, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *GenesisState) GetDenomTraces() Traces { + if m != nil { + return m.DenomTraces + } + return nil +} + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibcgo.apps.transfer.v1.GenesisState") +} + +func init() { + proto.RegisterFile("ibcgo/apps/transfer/v1/genesis.proto", fileDescriptor_19e19f3d07c11479) +} + +var fileDescriptor_19e19f3d07c11479 = []byte{ + // 305 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xc9, 0x4c, 0x4a, 0x4e, + 0xcf, 0xd7, 0x4f, 0x2c, 0x28, 0x28, 0xd6, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xd2, + 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x12, 0x03, 0xab, 0xd2, 0x03, 0xa9, 0xd2, 0x83, 0xa9, 0xd2, 0x2b, 0x33, 0x94, 0x52, 0xc5, + 0xa1, 0x1b, 0xae, 0x06, 0xac, 0x5d, 0x4a, 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1, + 0x20, 0xa2, 0x4a, 0xcf, 0x19, 0xb9, 0x78, 0xdc, 0x21, 0xd6, 0x04, 0x97, 0x24, 0x96, 0xa4, 0x0a, + 0x69, 0x73, 0xb1, 0x17, 0xe4, 0x17, 0x95, 0xc4, 0x67, 0xa6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, + 0x3a, 0x09, 0x7d, 0xba, 0x27, 0xcf, 0x57, 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x04, 0x95, 0x50, 0x0a, + 0x62, 0x03, 0xb1, 0x3c, 0x53, 0x84, 0x72, 0xb8, 0x78, 0x52, 0x52, 0xf3, 0xf2, 0x73, 0xe3, 0x4b, + 0x8a, 0x12, 0x93, 0x53, 0x8b, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0x94, 0xf4, 0xb0, 0xbb, + 0x54, 0xcf, 0x05, 0xa4, 0x36, 0x04, 0xa4, 0xd4, 0x49, 0xf5, 0xc4, 0x3d, 0x79, 0x86, 0x4f, 0xf7, + 0xe4, 0x85, 0x21, 0x26, 0x23, 0x9b, 0xa2, 0xb4, 0xea, 0xbe, 0x3c, 0x1b, 0x58, 0x55, 0x71, 0x10, + 0x77, 0x0a, 0x5c, 0x4b, 0xb1, 0x90, 0x0d, 0x17, 0x5b, 0x41, 0x62, 0x51, 0x62, 0x6e, 0xb1, 0x04, + 0xb3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x1c, 0x2e, 0x7b, 0x02, 0xc0, 0xaa, 0x9c, 0x58, 0x40, 0x76, + 0x04, 0x41, 0xf5, 0x38, 0xb9, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, + 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, + 0x4e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e, + 0x7e, 0xb1, 0x7e, 0x66, 0x52, 0xb2, 0x2e, 0x46, 0x98, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1, + 0x81, 0x03, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x67, 0xb9, 0x62, 0x19, 0xb5, 0x01, 0x00, + 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.DenomTraces) > 0 { + for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + if len(m.DenomTraces) > 0 { + for _, e := range m.DenomTraces { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomTraces = append(m.DenomTraces, DenomTrace{}) + if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/apps/transfer/types/query.pb.go b/apps/transfer/types/query.pb.go new file mode 100644 index 0000000000..3e365af18f --- /dev/null +++ b/apps/transfer/types/query.pb.go @@ -0,0 +1,1418 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/apps/transfer/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC +// method +type QueryDenomTraceRequest struct { + // hash (in hex format) of the denomination trace information. + Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *QueryDenomTraceRequest) Reset() { *m = QueryDenomTraceRequest{} } +func (m *QueryDenomTraceRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDenomTraceRequest) ProtoMessage() {} +func (*QueryDenomTraceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_956e6703e65895ef, []int{0} +} +func (m *QueryDenomTraceRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDenomTraceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDenomTraceRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDenomTraceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDenomTraceRequest.Merge(m, src) +} +func (m *QueryDenomTraceRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDenomTraceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDenomTraceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDenomTraceRequest proto.InternalMessageInfo + +func (m *QueryDenomTraceRequest) GetHash() string { + if m != nil { + return m.Hash + } + return "" +} + +// QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC +// method. +type QueryDenomTraceResponse struct { + // denom_trace returns the requested denomination trace information. + DenomTrace *DenomTrace `protobuf:"bytes,1,opt,name=denom_trace,json=denomTrace,proto3" json:"denom_trace,omitempty"` +} + +func (m *QueryDenomTraceResponse) Reset() { *m = QueryDenomTraceResponse{} } +func (m *QueryDenomTraceResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDenomTraceResponse) ProtoMessage() {} +func (*QueryDenomTraceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_956e6703e65895ef, []int{1} +} +func (m *QueryDenomTraceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDenomTraceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDenomTraceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDenomTraceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDenomTraceResponse.Merge(m, src) +} +func (m *QueryDenomTraceResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDenomTraceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDenomTraceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDenomTraceResponse proto.InternalMessageInfo + +func (m *QueryDenomTraceResponse) GetDenomTrace() *DenomTrace { + if m != nil { + return m.DenomTrace + } + return nil +} + +// QueryConnectionsRequest is the request type for the Query/DenomTraces RPC +// method +type QueryDenomTracesRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDenomTracesRequest) Reset() { *m = QueryDenomTracesRequest{} } +func (m *QueryDenomTracesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDenomTracesRequest) ProtoMessage() {} +func (*QueryDenomTracesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_956e6703e65895ef, []int{2} +} +func (m *QueryDenomTracesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDenomTracesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDenomTracesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDenomTracesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDenomTracesRequest.Merge(m, src) +} +func (m *QueryDenomTracesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDenomTracesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDenomTracesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDenomTracesRequest proto.InternalMessageInfo + +func (m *QueryDenomTracesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryConnectionsResponse is the response type for the Query/DenomTraces RPC +// method. +type QueryDenomTracesResponse struct { + // denom_traces returns all denominations trace information. + DenomTraces Traces `protobuf:"bytes,1,rep,name=denom_traces,json=denomTraces,proto3,castrepeated=Traces" json:"denom_traces"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDenomTracesResponse) Reset() { *m = QueryDenomTracesResponse{} } +func (m *QueryDenomTracesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDenomTracesResponse) ProtoMessage() {} +func (*QueryDenomTracesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_956e6703e65895ef, []int{3} +} +func (m *QueryDenomTracesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDenomTracesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDenomTracesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDenomTracesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDenomTracesResponse.Merge(m, src) +} +func (m *QueryDenomTracesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDenomTracesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDenomTracesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDenomTracesResponse proto.InternalMessageInfo + +func (m *QueryDenomTracesResponse) GetDenomTraces() Traces { + if m != nil { + return m.DenomTraces + } + return nil +} + +func (m *QueryDenomTracesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_956e6703e65895ef, []int{4} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params defines the parameters of the module. + Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_956e6703e65895ef, []int{5} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() *Params { + if m != nil { + return m.Params + } + return nil +} + +func init() { + proto.RegisterType((*QueryDenomTraceRequest)(nil), "ibcgo.apps.transfer.v1.QueryDenomTraceRequest") + proto.RegisterType((*QueryDenomTraceResponse)(nil), "ibcgo.apps.transfer.v1.QueryDenomTraceResponse") + proto.RegisterType((*QueryDenomTracesRequest)(nil), "ibcgo.apps.transfer.v1.QueryDenomTracesRequest") + proto.RegisterType((*QueryDenomTracesResponse)(nil), "ibcgo.apps.transfer.v1.QueryDenomTracesResponse") + proto.RegisterType((*QueryParamsRequest)(nil), "ibcgo.apps.transfer.v1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "ibcgo.apps.transfer.v1.QueryParamsResponse") +} + +func init() { + proto.RegisterFile("ibcgo/apps/transfer/v1/query.proto", fileDescriptor_956e6703e65895ef) +} + +var fileDescriptor_956e6703e65895ef = []byte{ + // 519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0x13, 0x3d, + 0x14, 0x8d, 0xdb, 0xef, 0x8b, 0xc4, 0x0d, 0x62, 0x61, 0xaa, 0x12, 0x8d, 0xaa, 0x69, 0x65, 0x95, + 0xbf, 0xb4, 0xd8, 0x4c, 0x91, 0x78, 0x80, 0x82, 0xca, 0x0a, 0xa9, 0x44, 0xb0, 0x61, 0x01, 0xf2, + 0x4c, 0xcd, 0x64, 0x24, 0x32, 0x9e, 0x8e, 0x9d, 0x48, 0x15, 0x62, 0xc3, 0x86, 0x2d, 0x12, 0x5b, + 0x16, 0xac, 0x79, 0x04, 0x9e, 0xa0, 0xcb, 0x4a, 0xdd, 0xb0, 0x02, 0x94, 0xf0, 0x20, 0x68, 0x6c, + 0x4f, 0x33, 0xd1, 0x24, 0x74, 0x76, 0xd6, 0xf5, 0xb9, 0xe7, 0x9e, 0x73, 0xae, 0x65, 0x20, 0x49, + 0x18, 0xc5, 0x92, 0xf1, 0x2c, 0x53, 0x4c, 0xe7, 0x3c, 0x55, 0x6f, 0x44, 0xce, 0xc6, 0x01, 0x3b, + 0x1e, 0x89, 0xfc, 0x84, 0x66, 0xb9, 0xd4, 0x12, 0xaf, 0x1b, 0x0c, 0x2d, 0x30, 0xb4, 0xc4, 0xd0, + 0x71, 0xe0, 0xad, 0xc5, 0x32, 0x96, 0x06, 0xc2, 0x8a, 0x93, 0x45, 0x7b, 0xbd, 0x48, 0xaa, 0xa1, + 0x54, 0x2c, 0xe4, 0x4a, 0x58, 0x1a, 0x36, 0x0e, 0x42, 0xa1, 0x79, 0xc0, 0x32, 0x1e, 0x27, 0x29, + 0xd7, 0x89, 0x4c, 0x1d, 0xf6, 0xe6, 0x92, 0xe9, 0x17, 0x53, 0x2c, 0x6c, 0x23, 0x96, 0x32, 0x7e, + 0x2b, 0x18, 0xcf, 0x12, 0xc6, 0xd3, 0x54, 0x6a, 0xc3, 0xa1, 0xec, 0x2d, 0xd9, 0x85, 0xf5, 0x67, + 0xc5, 0x98, 0xc7, 0x22, 0x95, 0xc3, 0xe7, 0x39, 0x8f, 0x44, 0x5f, 0x1c, 0x8f, 0x84, 0xd2, 0x18, + 0xc3, 0x7f, 0x03, 0xae, 0x06, 0x5d, 0xb4, 0x85, 0xee, 0x5c, 0xe9, 0x9b, 0x33, 0x79, 0x05, 0x37, + 0x6a, 0x68, 0x95, 0xc9, 0x54, 0x09, 0xfc, 0x08, 0x3a, 0x47, 0x45, 0xf5, 0xb5, 0x2e, 0xca, 0xa6, + 0xab, 0xb3, 0x47, 0xe8, 0x62, 0xf7, 0xb4, 0x42, 0x00, 0x47, 0x17, 0x67, 0xc2, 0x6b, 0xfc, 0xaa, + 0x94, 0x73, 0x00, 0x30, 0x4b, 0xc0, 0xd1, 0xdf, 0xa2, 0x36, 0x2e, 0x5a, 0xc4, 0x45, 0x6d, 0xea, + 0x2e, 0x2e, 0x7a, 0xc8, 0xe3, 0xd2, 0x4a, 0xbf, 0xd2, 0x49, 0xbe, 0x23, 0xe8, 0xd6, 0x67, 0x38, + 0x13, 0x2f, 0xe0, 0x6a, 0xc5, 0x84, 0xea, 0xa2, 0xad, 0xd5, 0x66, 0x2e, 0xf6, 0xaf, 0x9d, 0xfe, + 0xdc, 0x6c, 0x7d, 0xfb, 0xb5, 0xd9, 0x76, 0x8c, 0x9d, 0x99, 0x2b, 0x85, 0x9f, 0xcc, 0x69, 0x5f, + 0x31, 0xda, 0x6f, 0x5f, 0xaa, 0xdd, 0x6a, 0x9a, 0x13, 0xbf, 0x06, 0xd8, 0x68, 0x3f, 0xe4, 0x39, + 0x1f, 0x96, 0xd1, 0x90, 0xa7, 0x70, 0x7d, 0xae, 0xea, 0xcc, 0x3c, 0x84, 0x76, 0x66, 0x2a, 0x2e, + 0x2d, 0x7f, 0x99, 0x0d, 0xd7, 0xe7, 0xd0, 0x7b, 0xe7, 0xab, 0xf0, 0xbf, 0xe1, 0xc3, 0x5f, 0x11, + 0xc0, 0xcc, 0x23, 0xa6, 0xcb, 0x08, 0x16, 0xbf, 0x20, 0x8f, 0x35, 0xc6, 0x5b, 0xc5, 0x24, 0xf8, + 0x70, 0xfe, 0xe7, 0xf3, 0xca, 0x0e, 0xbe, 0xcb, 0x92, 0x30, 0xaa, 0x3f, 0xec, 0xea, 0x6a, 0xd8, + 0xbb, 0xe2, 0x41, 0xbe, 0xc7, 0x5f, 0x10, 0x74, 0x2a, 0x9b, 0xc4, 0x4d, 0x67, 0x96, 0xe1, 0x79, + 0xf7, 0x9b, 0x37, 0x38, 0x95, 0x3d, 0xa3, 0x72, 0x1b, 0x93, 0xcb, 0x55, 0xe2, 0x8f, 0x08, 0xda, + 0x36, 0x5e, 0xdc, 0xfb, 0xe7, 0xa0, 0xb9, 0x8d, 0x7a, 0x3b, 0x8d, 0xb0, 0x4e, 0xcf, 0xb6, 0xd1, + 0xe3, 0xe3, 0x8d, 0xc5, 0x7a, 0xec, 0x56, 0xf7, 0x0f, 0x4e, 0x27, 0x3e, 0x3a, 0x9b, 0xf8, 0xe8, + 0xf7, 0xc4, 0x47, 0x9f, 0xa6, 0x7e, 0xeb, 0x6c, 0xea, 0xb7, 0x7e, 0x4c, 0xfd, 0xd6, 0xcb, 0xdd, + 0x38, 0xd1, 0x83, 0x51, 0x48, 0x23, 0x39, 0x64, 0xee, 0xfb, 0x49, 0xc2, 0xe8, 0x5e, 0xed, 0x6b, + 0xd1, 0x27, 0x99, 0x50, 0x61, 0xdb, 0xfc, 0x1b, 0x0f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xef, + 0xe0, 0xd8, 0x44, 0xfc, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // DenomTrace queries a denomination trace information. + DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) + // DenomTraces queries all denomination traces. + DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) + // Params queries all parameters of the ibc-transfer module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) DenomTrace(ctx context.Context, in *QueryDenomTraceRequest, opts ...grpc.CallOption) (*QueryDenomTraceResponse, error) { + out := new(QueryDenomTraceResponse) + err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/DenomTrace", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) DenomTraces(ctx context.Context, in *QueryDenomTracesRequest, opts ...grpc.CallOption) (*QueryDenomTracesResponse, error) { + out := new(QueryDenomTracesResponse) + err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/DenomTraces", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // DenomTrace queries a denomination trace information. + DenomTrace(context.Context, *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error) + // DenomTraces queries all denomination traces. + DenomTraces(context.Context, *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error) + // Params queries all parameters of the ibc-transfer module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) DenomTrace(ctx context.Context, req *QueryDenomTraceRequest) (*QueryDenomTraceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DenomTrace not implemented") +} +func (*UnimplementedQueryServer) DenomTraces(ctx context.Context, req *QueryDenomTracesRequest) (*QueryDenomTracesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DenomTraces not implemented") +} +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_DenomTrace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDenomTraceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DenomTrace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.apps.transfer.v1.Query/DenomTrace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DenomTrace(ctx, req.(*QueryDenomTraceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_DenomTraces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDenomTracesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).DenomTraces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.apps.transfer.v1.Query/DenomTraces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).DenomTraces(ctx, req.(*QueryDenomTracesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.apps.transfer.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibcgo.apps.transfer.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DenomTrace", + Handler: _Query_DenomTrace_Handler, + }, + { + MethodName: "DenomTraces", + Handler: _Query_DenomTraces_Handler, + }, + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibcgo/apps/transfer/v1/query.proto", +} + +func (m *QueryDenomTraceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDenomTraceRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDenomTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDenomTraceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDenomTraceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDenomTraceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DenomTrace != nil { + { + size, err := m.DenomTrace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDenomTracesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDenomTracesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDenomTracesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDenomTracesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDenomTracesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDenomTracesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DenomTraces) > 0 { + for iNdEx := len(m.DenomTraces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DenomTraces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Params != nil { + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryDenomTraceRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDenomTraceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DenomTrace != nil { + l = m.DenomTrace.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDenomTracesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDenomTracesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DenomTraces) > 0 { + for _, e := range m.DenomTraces { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Params != nil { + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryDenomTraceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDenomTraceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDenomTraceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDenomTraceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDenomTraceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDenomTraceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomTrace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DenomTrace == nil { + m.DenomTrace = &DenomTrace{} + } + if err := m.DenomTrace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDenomTracesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDenomTracesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDenomTracesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDenomTracesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDenomTracesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDenomTracesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomTraces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomTraces = append(m.DenomTraces, DenomTrace{}) + if err := m.DenomTraces[len(m.DenomTraces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Params == nil { + m.Params = &Params{} + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/apps/transfer/types/query.pb.gw.go b/apps/transfer/types/query.pb.gw.go new file mode 100644 index 0000000000..4333649f14 --- /dev/null +++ b/apps/transfer/types/query.pb.gw.go @@ -0,0 +1,326 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: ibcgo/apps/transfer/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDenomTraceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash") + } + + protoReq.Hash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err) + } + + msg, err := client.DenomTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DenomTrace_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDenomTraceRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["hash"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "hash") + } + + protoReq.Hash, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "hash", err) + } + + msg, err := server.DenomTrace(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_DenomTraces_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDenomTracesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DenomTraces(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_DenomTraces_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDenomTracesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_DenomTraces_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DenomTraces(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DenomTrace_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_DenomTraces_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_DenomTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DenomTrace_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DenomTrace_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_DenomTraces_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_DenomTraces_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_DenomTraces_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_DenomTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "apps", "transfer", "v1", "denom_traces", "hash"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_DenomTraces_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "apps", "transfer", "v1", "denom_traces"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "apps", "transfer", "v1", "params"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_DenomTrace_0 = runtime.ForwardResponseMessage + + forward_Query_DenomTraces_0 = runtime.ForwardResponseMessage + + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/apps/transfer/types/transfer.pb.go b/apps/transfer/types/transfer.pb.go new file mode 100644 index 0000000000..7b405f308d --- /dev/null +++ b/apps/transfer/types/transfer.pb.go @@ -0,0 +1,908 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/apps/transfer/v1/transfer.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// FungibleTokenPacketData defines a struct for the packet payload +// See FungibleTokenPacketData spec: +// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures +type FungibleTokenPacketData struct { + // the token denomination to be transferred + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + // the token amount to be transferred + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + // the sender address + Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` + // the recipient address on the destination chain + Receiver string `protobuf:"bytes,4,opt,name=receiver,proto3" json:"receiver,omitempty"` +} + +func (m *FungibleTokenPacketData) Reset() { *m = FungibleTokenPacketData{} } +func (m *FungibleTokenPacketData) String() string { return proto.CompactTextString(m) } +func (*FungibleTokenPacketData) ProtoMessage() {} +func (*FungibleTokenPacketData) Descriptor() ([]byte, []int) { + return fileDescriptor_0cd9e010e90bbec6, []int{0} +} +func (m *FungibleTokenPacketData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FungibleTokenPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FungibleTokenPacketData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FungibleTokenPacketData) XXX_Merge(src proto.Message) { + xxx_messageInfo_FungibleTokenPacketData.Merge(m, src) +} +func (m *FungibleTokenPacketData) XXX_Size() int { + return m.Size() +} +func (m *FungibleTokenPacketData) XXX_DiscardUnknown() { + xxx_messageInfo_FungibleTokenPacketData.DiscardUnknown(m) +} + +var xxx_messageInfo_FungibleTokenPacketData proto.InternalMessageInfo + +func (m *FungibleTokenPacketData) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *FungibleTokenPacketData) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *FungibleTokenPacketData) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *FungibleTokenPacketData) GetReceiver() string { + if m != nil { + return m.Receiver + } + return "" +} + +// DenomTrace contains the base denomination for ICS20 fungible tokens and the +// source tracing information path. +type DenomTrace struct { + // path defines the chain of port/channel identifiers used for tracing the + // source of the fungible token. + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // base denomination of the relayed fungible token. + BaseDenom string `protobuf:"bytes,2,opt,name=base_denom,json=baseDenom,proto3" json:"base_denom,omitempty"` +} + +func (m *DenomTrace) Reset() { *m = DenomTrace{} } +func (m *DenomTrace) String() string { return proto.CompactTextString(m) } +func (*DenomTrace) ProtoMessage() {} +func (*DenomTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_0cd9e010e90bbec6, []int{1} +} +func (m *DenomTrace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DenomTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DenomTrace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DenomTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_DenomTrace.Merge(m, src) +} +func (m *DenomTrace) XXX_Size() int { + return m.Size() +} +func (m *DenomTrace) XXX_DiscardUnknown() { + xxx_messageInfo_DenomTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_DenomTrace proto.InternalMessageInfo + +func (m *DenomTrace) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *DenomTrace) GetBaseDenom() string { + if m != nil { + return m.BaseDenom + } + return "" +} + +// Params defines the set of IBC transfer parameters. +// NOTE: To prevent a single token from being transferred, set the +// TransfersEnabled parameter to true and then set the bank module's SendEnabled +// parameter for the denomination to false. +type Params struct { + // send_enabled enables or disables all cross-chain token transfers from this + // chain. + SendEnabled bool `protobuf:"varint,1,opt,name=send_enabled,json=sendEnabled,proto3" json:"send_enabled,omitempty" yaml:"send_enabled"` + // receive_enabled enables or disables all cross-chain token transfers to this + // chain. + ReceiveEnabled bool `protobuf:"varint,2,opt,name=receive_enabled,json=receiveEnabled,proto3" json:"receive_enabled,omitempty" yaml:"receive_enabled"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_0cd9e010e90bbec6, []int{2} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetSendEnabled() bool { + if m != nil { + return m.SendEnabled + } + return false +} + +func (m *Params) GetReceiveEnabled() bool { + if m != nil { + return m.ReceiveEnabled + } + return false +} + +func init() { + proto.RegisterType((*FungibleTokenPacketData)(nil), "ibcgo.apps.transfer.v1.FungibleTokenPacketData") + proto.RegisterType((*DenomTrace)(nil), "ibcgo.apps.transfer.v1.DenomTrace") + proto.RegisterType((*Params)(nil), "ibcgo.apps.transfer.v1.Params") +} + +func init() { + proto.RegisterFile("ibcgo/apps/transfer/v1/transfer.proto", fileDescriptor_0cd9e010e90bbec6) +} + +var fileDescriptor_0cd9e010e90bbec6 = []byte{ + // 349 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0xc1, 0x6a, 0xfa, 0x40, + 0x10, 0xc6, 0x8d, 0x7f, 0xff, 0xa2, 0xdb, 0xd2, 0xc2, 0x56, 0x34, 0x08, 0x8d, 0x12, 0x28, 0x78, + 0x68, 0x13, 0xa4, 0x37, 0x2f, 0x05, 0x6b, 0x3d, 0x4b, 0xf0, 0xd4, 0x8b, 0x6c, 0xd6, 0x69, 0x0c, + 0x9a, 0xdd, 0xb0, 0xbb, 0x0a, 0xd2, 0x27, 0xe8, 0xad, 0x8f, 0xd5, 0xa3, 0xc7, 0x9e, 0xa4, 0xe8, + 0x1b, 0xf8, 0x04, 0x65, 0x37, 0x21, 0x94, 0xf6, 0x36, 0xdf, 0x7c, 0xbf, 0x6f, 0x66, 0x60, 0xd0, + 0x4d, 0x1c, 0xd2, 0x88, 0xfb, 0x24, 0x4d, 0xa5, 0xaf, 0x04, 0x61, 0xf2, 0x05, 0x84, 0xbf, 0xe9, + 0x17, 0xb5, 0x97, 0x0a, 0xae, 0x38, 0x6e, 0x1a, 0xcc, 0xd3, 0x98, 0x57, 0x58, 0x9b, 0x7e, 0xbb, + 0x11, 0xf1, 0x88, 0x1b, 0xc4, 0xd7, 0x55, 0x46, 0xbb, 0xaf, 0xa8, 0x35, 0x5e, 0xb3, 0x28, 0x0e, + 0x57, 0x30, 0xe5, 0x4b, 0x60, 0x13, 0x42, 0x97, 0xa0, 0x46, 0x44, 0x11, 0xdc, 0x40, 0xff, 0xe7, + 0xc0, 0x78, 0x62, 0x5b, 0x5d, 0xab, 0x57, 0x0f, 0x32, 0x81, 0x9b, 0xa8, 0x4a, 0x12, 0xbe, 0x66, + 0xca, 0x2e, 0x77, 0xad, 0x5e, 0x25, 0xc8, 0x95, 0xee, 0x4b, 0x60, 0x73, 0x10, 0xf6, 0x3f, 0x83, + 0xe7, 0x0a, 0xb7, 0x51, 0x4d, 0x00, 0x85, 0x78, 0x03, 0xc2, 0xae, 0x18, 0xa7, 0xd0, 0xee, 0x03, + 0x42, 0x23, 0x3d, 0x74, 0x2a, 0x08, 0x05, 0x8c, 0x51, 0x25, 0x25, 0x6a, 0x91, 0xaf, 0x33, 0x35, + 0xbe, 0x46, 0x28, 0x24, 0x12, 0x66, 0xd9, 0x21, 0x65, 0xe3, 0xd4, 0x75, 0xc7, 0xe4, 0xdc, 0x37, + 0x0b, 0x55, 0x27, 0x44, 0x90, 0x44, 0xe2, 0x01, 0x3a, 0xd7, 0x1b, 0x67, 0xc0, 0x48, 0xb8, 0x82, + 0xb9, 0x99, 0x52, 0x1b, 0xb6, 0x4e, 0xfb, 0xce, 0xd5, 0x96, 0x24, 0xab, 0x81, 0xfb, 0xd3, 0x75, + 0x83, 0x33, 0x2d, 0x9f, 0x32, 0x85, 0x1f, 0xd1, 0x65, 0x7e, 0x53, 0x11, 0x2f, 0x9b, 0x78, 0xfb, + 0xb4, 0xef, 0x34, 0xb3, 0xf8, 0x2f, 0xc0, 0x0d, 0x2e, 0xf2, 0x4e, 0x3e, 0x64, 0x38, 0xfe, 0x38, + 0x38, 0xd6, 0xee, 0xe0, 0x58, 0x5f, 0x07, 0xc7, 0x7a, 0x3f, 0x3a, 0xa5, 0xdd, 0xd1, 0x29, 0x7d, + 0x1e, 0x9d, 0xd2, 0xf3, 0x6d, 0x14, 0xab, 0xc5, 0x3a, 0xf4, 0x28, 0x4f, 0x7c, 0xca, 0x65, 0xc2, + 0xa5, 0x1f, 0x87, 0xf4, 0xee, 0xcf, 0x2f, 0xd5, 0x36, 0x05, 0x19, 0x56, 0xcd, 0x63, 0xee, 0xbf, + 0x03, 0x00, 0x00, 0xff, 0xff, 0x41, 0x6a, 0xce, 0x58, 0xef, 0x01, 0x00, 0x00, +} + +func (m *FungibleTokenPacketData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FungibleTokenPacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FungibleTokenPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x22 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x1a + } + if m.Amount != 0 { + i = encodeVarintTransfer(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DenomTrace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DenomTrace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DenomTrace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BaseDenom) > 0 { + i -= len(m.BaseDenom) + copy(dAtA[i:], m.BaseDenom) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.BaseDenom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintTransfer(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ReceiveEnabled { + i-- + if m.ReceiveEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.SendEnabled { + i-- + if m.SendEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTransfer(dAtA []byte, offset int, v uint64) int { + offset -= sovTransfer(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FungibleTokenPacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovTransfer(uint64(m.Amount)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + return n +} + +func (m *DenomTrace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + l = len(m.BaseDenom) + if l > 0 { + n += 1 + l + sovTransfer(uint64(l)) + } + return n +} + +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SendEnabled { + n += 2 + } + if m.ReceiveEnabled { + n += 2 + } + return n +} + +func sovTransfer(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTransfer(x uint64) (n int) { + return sovTransfer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *FungibleTokenPacketData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FungibleTokenPacketData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FungibleTokenPacketData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTransfer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTransfer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DenomTrace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DenomTrace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DenomTrace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaseDenom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTransfer + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTransfer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BaseDenom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTransfer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTransfer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SendEnabled = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReceiveEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTransfer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReceiveEnabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTransfer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTransfer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTransfer(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTransfer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTransfer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTransfer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTransfer + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTransfer + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTransfer + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTransfer = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTransfer = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTransfer = fmt.Errorf("proto: unexpected end of group") +) diff --git a/apps/transfer/types/tx.pb.go b/apps/transfer/types/tx.pb.go new file mode 100644 index 0000000000..0d5b29f4da --- /dev/null +++ b/apps/transfer/types/tx.pb.go @@ -0,0 +1,801 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/apps/transfer/v1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + types1 "github.com/cosmos/ibc-go/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between +// ICS20 enabled chains. See ICS Spec here: +// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures +type MsgTransfer struct { + // the port on which the packet will be sent + SourcePort string `protobuf:"bytes,1,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty" yaml:"source_port"` + // the channel by which the packet will be sent + SourceChannel string `protobuf:"bytes,2,opt,name=source_channel,json=sourceChannel,proto3" json:"source_channel,omitempty" yaml:"source_channel"` + // the tokens to be transferred + Token types.Coin `protobuf:"bytes,3,opt,name=token,proto3" json:"token"` + // the sender address + Sender string `protobuf:"bytes,4,opt,name=sender,proto3" json:"sender,omitempty"` + // the recipient address on the destination chain + Receiver string `protobuf:"bytes,5,opt,name=receiver,proto3" json:"receiver,omitempty"` + // Timeout height relative to the current block height. + // The timeout is disabled when set to 0. + TimeoutHeight types1.Height `protobuf:"bytes,6,opt,name=timeout_height,json=timeoutHeight,proto3" json:"timeout_height" yaml:"timeout_height"` + // Timeout timestamp (in nanoseconds) relative to the current block timestamp. + // The timeout is disabled when set to 0. + TimeoutTimestamp uint64 `protobuf:"varint,7,opt,name=timeout_timestamp,json=timeoutTimestamp,proto3" json:"timeout_timestamp,omitempty" yaml:"timeout_timestamp"` +} + +func (m *MsgTransfer) Reset() { *m = MsgTransfer{} } +func (m *MsgTransfer) String() string { return proto.CompactTextString(m) } +func (*MsgTransfer) ProtoMessage() {} +func (*MsgTransfer) Descriptor() ([]byte, []int) { + return fileDescriptor_4ca3945bed527d36, []int{0} +} +func (m *MsgTransfer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTransfer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTransfer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTransfer) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTransfer.Merge(m, src) +} +func (m *MsgTransfer) XXX_Size() int { + return m.Size() +} +func (m *MsgTransfer) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTransfer.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTransfer proto.InternalMessageInfo + +// MsgTransferResponse defines the Msg/Transfer response type. +type MsgTransferResponse struct { +} + +func (m *MsgTransferResponse) Reset() { *m = MsgTransferResponse{} } +func (m *MsgTransferResponse) String() string { return proto.CompactTextString(m) } +func (*MsgTransferResponse) ProtoMessage() {} +func (*MsgTransferResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4ca3945bed527d36, []int{1} +} +func (m *MsgTransferResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTransferResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTransferResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTransferResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTransferResponse.Merge(m, src) +} +func (m *MsgTransferResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgTransferResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTransferResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTransferResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgTransfer)(nil), "ibcgo.apps.transfer.v1.MsgTransfer") + proto.RegisterType((*MsgTransferResponse)(nil), "ibcgo.apps.transfer.v1.MsgTransferResponse") +} + +func init() { proto.RegisterFile("ibcgo/apps/transfer/v1/tx.proto", fileDescriptor_4ca3945bed527d36) } + +var fileDescriptor_4ca3945bed527d36 = []byte{ + // 478 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x13, 0xdb, 0xad, 0x75, 0xca, 0x2e, 0x3a, 0xba, 0x25, 0x5b, 0xd6, 0xa4, 0xc6, 0x4b, + 0x41, 0x9d, 0x21, 0x2b, 0x22, 0xec, 0x49, 0xb2, 0x20, 0x7a, 0x58, 0x90, 0xb0, 0x27, 0x11, 0x96, + 0x64, 0x7c, 0xa6, 0xc1, 0x26, 0x13, 0x66, 0xa6, 0xc1, 0xfd, 0x06, 0x1e, 0xfd, 0x08, 0xfb, 0x01, + 0xfc, 0x20, 0x7b, 0xec, 0xd1, 0x53, 0x91, 0xf6, 0xe2, 0xb9, 0x9f, 0x40, 0x92, 0x99, 0xd6, 0x16, + 0x3d, 0x78, 0x9a, 0x79, 0xef, 0xff, 0x7b, 0xf3, 0xe7, 0xbd, 0x79, 0xc8, 0xcb, 0x12, 0x96, 0x72, + 0x1a, 0x97, 0xa5, 0xa4, 0x4a, 0xc4, 0x85, 0xfc, 0x04, 0x82, 0x56, 0x01, 0x55, 0x5f, 0x48, 0x29, + 0xb8, 0xe2, 0xb8, 0xdf, 0x00, 0xa4, 0x06, 0xc8, 0x1a, 0x20, 0x55, 0x30, 0x78, 0x90, 0xf2, 0x94, + 0x37, 0x08, 0xad, 0x6f, 0x9a, 0x1e, 0xb8, 0x8c, 0xcb, 0x9c, 0x4b, 0x9a, 0xc4, 0x12, 0x68, 0x15, + 0x24, 0xa0, 0xe2, 0x80, 0x32, 0x9e, 0x15, 0x46, 0x7f, 0xa4, 0xed, 0x18, 0x17, 0x40, 0xd9, 0x24, + 0x83, 0x42, 0xd5, 0x66, 0xfa, 0xa6, 0x11, 0xff, 0x7b, 0x0b, 0xf5, 0xce, 0x65, 0x7a, 0x61, 0xbc, + 0xf0, 0x4b, 0xd4, 0x93, 0x7c, 0x2a, 0x18, 0x5c, 0x96, 0x5c, 0x28, 0xc7, 0x1e, 0xda, 0xa3, 0x3b, + 0x61, 0x7f, 0x35, 0xf7, 0xf0, 0x55, 0x9c, 0x4f, 0x4e, 0xfd, 0x2d, 0xd1, 0x8f, 0x90, 0x8e, 0xde, + 0x71, 0xa1, 0xf0, 0x2b, 0x74, 0x60, 0x34, 0x36, 0x8e, 0x8b, 0x02, 0x26, 0xce, 0xad, 0xa6, 0xf6, + 0x68, 0x35, 0xf7, 0x0e, 0x77, 0x6a, 0x8d, 0xee, 0x47, 0xfb, 0x3a, 0x71, 0xa6, 0x63, 0xfc, 0x02, + 0xed, 0x29, 0xfe, 0x19, 0x0a, 0xa7, 0x35, 0xb4, 0x47, 0xbd, 0x93, 0x23, 0xa2, 0xbb, 0x23, 0x75, + 0x77, 0xc4, 0x74, 0x47, 0xce, 0x78, 0x56, 0x84, 0xed, 0x9b, 0xb9, 0x67, 0x45, 0x9a, 0xc6, 0x7d, + 0xd4, 0x91, 0x50, 0x7c, 0x04, 0xe1, 0xb4, 0x6b, 0xc3, 0xc8, 0x44, 0x78, 0x80, 0xba, 0x02, 0x18, + 0x64, 0x15, 0x08, 0x67, 0xaf, 0x51, 0x36, 0x31, 0x4e, 0xd0, 0x81, 0xca, 0x72, 0xe0, 0x53, 0x75, + 0x39, 0x86, 0x2c, 0x1d, 0x2b, 0xa7, 0xd3, 0x78, 0x1e, 0x13, 0x3d, 0xff, 0x7a, 0x62, 0xc4, 0xcc, + 0xa9, 0x0a, 0xc8, 0x9b, 0x86, 0x09, 0x1f, 0xd6, 0xb6, 0x7f, 0xda, 0xd9, 0x7d, 0xc1, 0x8f, 0xf6, + 0x4d, 0x42, 0xd3, 0xf8, 0x2d, 0xba, 0xb7, 0x26, 0xea, 0x53, 0xaa, 0x38, 0x2f, 0x9d, 0xdb, 0x43, + 0x7b, 0xd4, 0x0e, 0x8f, 0x57, 0x73, 0xcf, 0xd9, 0x7d, 0x64, 0x83, 0xf8, 0xd1, 0x5d, 0x93, 0xbb, + 0x58, 0xa7, 0x4e, 0xbb, 0x5f, 0xaf, 0x3d, 0xeb, 0xd7, 0xb5, 0x67, 0xf9, 0x87, 0xe8, 0xfe, 0xd6, + 0x6f, 0x45, 0x20, 0x4b, 0x5e, 0x48, 0x38, 0x61, 0xa8, 0x75, 0x2e, 0x53, 0xfc, 0x01, 0x75, 0x37, + 0x1f, 0xf9, 0x98, 0xfc, 0x7b, 0x95, 0xc8, 0x56, 0xfd, 0xe0, 0xc9, 0x7f, 0x40, 0x6b, 0x93, 0xf0, + 0xf5, 0xcd, 0xc2, 0xb5, 0x67, 0x0b, 0xd7, 0xfe, 0xb9, 0x70, 0xed, 0x6f, 0x4b, 0xd7, 0x9a, 0x2d, + 0x5d, 0xeb, 0xc7, 0xd2, 0xb5, 0xde, 0x3f, 0x4d, 0x33, 0x35, 0x9e, 0x26, 0x84, 0xf1, 0x9c, 0x9a, + 0x95, 0xcc, 0x12, 0xf6, 0xec, 0xaf, 0x4d, 0x57, 0x57, 0x25, 0xc8, 0xa4, 0xd3, 0x6c, 0xde, 0xf3, + 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x48, 0x06, 0x61, 0x0d, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // Transfer defines a rpc handler method for MsgTransfer. + Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) Transfer(ctx context.Context, in *MsgTransfer, opts ...grpc.CallOption) (*MsgTransferResponse, error) { + out := new(MsgTransferResponse) + err := c.cc.Invoke(ctx, "/ibcgo.apps.transfer.v1.Msg/Transfer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // Transfer defines a rpc handler method for MsgTransfer. + Transfer(context.Context, *MsgTransfer) (*MsgTransferResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) Transfer(ctx context.Context, req *MsgTransfer) (*MsgTransferResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgTransfer) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Transfer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.apps.transfer.v1.Msg/Transfer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Transfer(ctx, req.(*MsgTransfer)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibcgo.apps.transfer.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Transfer", + Handler: _Msg_Transfer_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibcgo/apps/transfer/v1/tx.proto", +} + +func (m *MsgTransfer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTransfer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTransfer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeoutTimestamp != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.TimeoutTimestamp)) + i-- + dAtA[i] = 0x38 + } + { + size, err := m.TimeoutHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintTx(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x2a + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTx(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x22 + } + { + size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.SourceChannel) > 0 { + i -= len(m.SourceChannel) + copy(dAtA[i:], m.SourceChannel) + i = encodeVarintTx(dAtA, i, uint64(len(m.SourceChannel))) + i-- + dAtA[i] = 0x12 + } + if len(m.SourcePort) > 0 { + i -= len(m.SourcePort) + copy(dAtA[i:], m.SourcePort) + i = encodeVarintTx(dAtA, i, uint64(len(m.SourcePort))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgTransferResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTransferResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTransferResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgTransfer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SourcePort) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.SourceChannel) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Token.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.TimeoutHeight.Size() + n += 1 + l + sovTx(uint64(l)) + if m.TimeoutTimestamp != 0 { + n += 1 + sovTx(uint64(m.TimeoutTimestamp)) + } + return n +} + +func (m *MsgTransferResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgTransfer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTransfer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTransfer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePort", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePort = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceChannel", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceChannel = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimeoutHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutTimestamp", wireType) + } + m.TimeoutTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutTimestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTransferResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTransferResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTransferResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/buf.yaml b/buf.yaml new file mode 100644 index 0000000000..37f716caba --- /dev/null +++ b/buf.yaml @@ -0,0 +1,34 @@ +version: v1beta1 + +build: + roots: + - proto + - third_party/proto + excludes: + - third_party/proto/google/protobuf +lint: + use: + - DEFAULT + - COMMENTS + - FILE_LOWER_SNAKE_CASE + except: + - UNARY_RPC + - COMMENT_FIELD + - SERVICE_SUFFIX + - PACKAGE_VERSION_SUFFIX + - RPC_REQUEST_STANDARD_NAME + ignore: + - tendermint + - gogoproto + - cosmos_proto + - google + - confio +breaking: + use: + - FILE + ignore: + - tendermint + - gogoproto + - cosmos_proto + - google + - confio diff --git a/core/02-client/abci.go b/core/02-client/abci.go new file mode 100644 index 0000000000..3c56d90ad3 --- /dev/null +++ b/core/02-client/abci.go @@ -0,0 +1,20 @@ +package client + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// BeginBlocker updates an existing localhost client with the latest block height. +func BeginBlocker(ctx sdk.Context, k keeper.Keeper) { + _, found := k.GetClientState(ctx, exported.Localhost) + if !found { + return + } + + // update the localhost client with the latest block height + if err := k.UpdateClient(ctx, exported.Localhost, nil); err != nil { + panic(err) + } +} diff --git a/core/02-client/abci_test.go b/core/02-client/abci_test.go new file mode 100644 index 0000000000..3a296618b3 --- /dev/null +++ b/core/02-client/abci_test.go @@ -0,0 +1,60 @@ +package client_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type ClientTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +func (suite *ClientTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + + // set localhost client + revision := types.ParseChainID(suite.chainA.GetContext().ChainID()) + localHostClient := localhosttypes.NewClientState( + suite.chainA.GetContext().ChainID(), types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())), + ) + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient) +} + +func TestClientTestSuite(t *testing.T) { + suite.Run(t, new(ClientTestSuite)) +} + +func (suite *ClientTestSuite) TestBeginBlocker() { + prevHeight := types.GetSelfHeight(suite.chainA.GetContext()) + + localHostClient := suite.chainA.GetClientState(exported.Localhost) + suite.Require().Equal(prevHeight, localHostClient.GetLatestHeight()) + + for i := 0; i < 10; i++ { + // increment height + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + + suite.Require().NotPanics(func() { + client.BeginBlocker(suite.chainA.GetContext(), suite.chainA.App.IBCKeeper.ClientKeeper) + }, "BeginBlocker shouldn't panic") + + localHostClient = suite.chainA.GetClientState(exported.Localhost) + suite.Require().Equal(prevHeight.Increment(), localHostClient.GetLatestHeight()) + prevHeight = localHostClient.GetLatestHeight().(types.Height) + } +} diff --git a/core/02-client/client/cli/cli.go b/core/02-client/client/cli/cli.go new file mode 100644 index 0000000000..33c9915215 --- /dev/null +++ b/core/02-client/client/cli/cli.go @@ -0,0 +1,51 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +// GetQueryCmd returns the query commands for IBC clients +func GetQueryCmd() *cobra.Command { + queryCmd := &cobra.Command{ + Use: types.SubModuleName, + Short: "IBC client query subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + queryCmd.AddCommand( + GetCmdQueryClientStates(), + GetCmdQueryClientState(), + GetCmdQueryConsensusStates(), + GetCmdQueryConsensusState(), + GetCmdQueryHeader(), + GetCmdNodeConsensusState(), + GetCmdParams(), + ) + + return queryCmd +} + +// NewTxCmd returns the command to create and handle IBC clients +func NewTxCmd() *cobra.Command { + txCmd := &cobra.Command{ + Use: types.SubModuleName, + Short: "IBC client transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + txCmd.AddCommand( + NewCreateClientCmd(), + NewUpdateClientCmd(), + NewSubmitMisbehaviourCmd(), + NewUpgradeClientCmd(), + ) + + return txCmd +} diff --git a/core/02-client/client/cli/query.go b/core/02-client/client/cli/query.go new file mode 100644 index 0000000000..c1b5e51a05 --- /dev/null +++ b/core/02-client/client/cli/query.go @@ -0,0 +1,260 @@ +package cli + +import ( + "errors" + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +const ( + flagLatestHeight = "latest-height" +) + +// GetCmdQueryClientStates defines the command to query all the light clients +// that this chain mantains. +func GetCmdQueryClientStates() *cobra.Command { + cmd := &cobra.Command{ + Use: "states", + Short: "Query all available light clients", + Long: "Query all available light clients", + Example: fmt.Sprintf("%s query %s %s states", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + req := &types.QueryClientStatesRequest{ + Pagination: pageReq, + } + + res, err := queryClient.ClientStates(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "client states") + + return cmd +} + +// GetCmdQueryClientState defines the command to query the state of a client with +// a given id as defined in https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#query +func GetCmdQueryClientState() *cobra.Command { + cmd := &cobra.Command{ + Use: "state [client-id]", + Short: "Query a client state", + Long: "Query stored client state", + Example: fmt.Sprintf("%s query %s %s state [client-id]", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + clientID := args[0] + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + clientStateRes, err := utils.QueryClientState(clientCtx, clientID, prove) + if err != nil { + return err + } + + return clientCtx.PrintProto(clientStateRes) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryConsensusStates defines the command to query all the consensus states from a given +// client state. +func GetCmdQueryConsensusStates() *cobra.Command { + cmd := &cobra.Command{ + Use: "consensus-states [client-id]", + Short: "Query all the consensus states of a client.", + Long: "Query all the consensus states from a given client state.", + Example: fmt.Sprintf("%s query %s %s consensus-states [client-id]", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + clientID := args[0] + + queryClient := types.NewQueryClient(clientCtx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + req := &types.QueryConsensusStatesRequest{ + ClientId: clientID, + Pagination: pageReq, + } + + res, err := queryClient.ConsensusStates(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "consensus states") + + return cmd +} + +// GetCmdQueryConsensusState defines the command to query the consensus state of +// the chain as defined in https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#query +func GetCmdQueryConsensusState() *cobra.Command { + cmd := &cobra.Command{ + Use: "consensus-state [client-id] [height]", + Short: "Query the consensus state of a client at a given height", + Long: `Query the consensus state for a particular light client at a given height. +If the '--latest' flag is included, the query returns the latest consensus state, overriding the height argument.`, + Example: fmt.Sprintf("%s query %s %s consensus-state [client-id] [height]", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.RangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + clientID := args[0] + queryLatestHeight, _ := cmd.Flags().GetBool(flagLatestHeight) + var height types.Height + + if !queryLatestHeight { + if len(args) != 2 { + return errors.New("must include a second 'height' argument when '--latest-height' flag is not provided") + } + + height, err = types.ParseHeight(args[1]) + if err != nil { + return err + } + } + + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + csRes, err := utils.QueryConsensusState(clientCtx, clientID, height, prove, queryLatestHeight) + if err != nil { + return err + } + + return clientCtx.PrintProto(csRes) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + cmd.Flags().Bool(flagLatestHeight, false, "return latest stored consensus state") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryHeader defines the command to query the latest header on the chain +func GetCmdQueryHeader() *cobra.Command { + cmd := &cobra.Command{ + Use: "header", + Short: "Query the latest header of the running chain", + Long: "Query the latest Tendermint header of the running chain", + Example: fmt.Sprintf("%s query %s %s header", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + header, _, err := utils.QueryTendermintHeader(clientCtx) + if err != nil { + return err + } + + return clientCtx.PrintProto(&header) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdNodeConsensusState defines the command to query the latest consensus state of a node +// The result is feed to client creation +func GetCmdNodeConsensusState() *cobra.Command { + cmd := &cobra.Command{ + Use: "node-state", + Short: "Query a node consensus state", + Long: "Query a node consensus state. This result is feed to the client creation transaction.", + Example: fmt.Sprintf("%s query %s %s node-state", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + state, _, err := utils.QueryNodeConsensusState(clientCtx) + if err != nil { + return err + } + + return clientCtx.PrintProto(state) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdParams returns the command handler for ibc client parameter querying. +func GetCmdParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "Query the current ibc client parameters", + Long: "Query the current ibc client parameters", + Args: cobra.NoArgs, + Example: fmt.Sprintf("%s query %s %s params", version.AppName, host.ModuleName, types.SubModuleName), + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + res, _ := queryClient.ClientParams(cmd.Context(), &types.QueryClientParamsRequest{}) + return clientCtx.PrintProto(res.Params) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/core/02-client/client/cli/tx.go b/core/02-client/client/cli/tx.go new file mode 100644 index 0000000000..bdaa53a8ae --- /dev/null +++ b/core/02-client/client/cli/tx.go @@ -0,0 +1,328 @@ +package cli + +import ( + "fmt" + "io/ioutil" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" + "github.com/cosmos/cosmos-sdk/version" + govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// NewCreateClientCmd defines the command to create a new IBC light client. +func NewCreateClientCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create [path/to/client_state.json] [path/to/consensus_state.json]", + Short: "create new IBC client", + Long: `create a new IBC client with the specified client state and consensus state + - ClientState JSON example: {"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false} + - ConsensusState JSON example: {"@type":"/ibc.lightclients.solomachine.v1.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`, + Example: fmt.Sprintf("%s tx ibc %s create [path/to/client_state.json] [path/to/consensus_state.json] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + // attempt to unmarshal client state argument + var clientState exported.ClientState + clientContentOrFileName := args[0] + if err := cdc.UnmarshalInterfaceJSON([]byte(clientContentOrFileName), &clientState); err != nil { + + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(clientContentOrFileName) + if err != nil { + return errors.Wrap(err, "neither JSON input nor path to .json file for client state were provided") + } + + if err := cdc.UnmarshalInterfaceJSON(contents, &clientState); err != nil { + return errors.Wrap(err, "error unmarshalling client state file") + } + } + + // attempt to unmarshal consensus state argument + var consensusState exported.ConsensusState + consensusContentOrFileName := args[1] + if err := cdc.UnmarshalInterfaceJSON([]byte(consensusContentOrFileName), &consensusState); err != nil { + + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(consensusContentOrFileName) + if err != nil { + return errors.Wrap(err, "neither JSON input nor path to .json file for consensus state were provided") + } + + if err := cdc.UnmarshalInterfaceJSON(contents, &consensusState); err != nil { + return errors.Wrap(err, "error unmarshalling consensus state file") + } + } + + msg, err := types.NewMsgCreateClient(clientState, consensusState, clientCtx.GetFromAddress()) + if err != nil { + return err + } + + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.CreateClient(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewUpdateClientCmd defines the command to update an IBC client. +func NewUpdateClientCmd() *cobra.Command { + return &cobra.Command{ + Use: "update [client-id] [path/to/header.json]", + Short: "update existing client with a header", + Long: "update existing client with a header", + Example: fmt.Sprintf("%s tx ibc %s update [client-id] [path/to/header.json] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + clientID := args[0] + + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + var header exported.Header + headerContentOrFileName := args[1] + if err := cdc.UnmarshalInterfaceJSON([]byte(headerContentOrFileName), &header); err != nil { + + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(headerContentOrFileName) + if err != nil { + return errors.Wrap(err, "neither JSON input nor path to .json file for header were provided") + } + + if err := cdc.UnmarshalInterfaceJSON(contents, &header); err != nil { + return errors.Wrap(err, "error unmarshalling header file") + } + } + + msg, err := types.NewMsgUpdateClient(clientID, header, clientCtx.GetFromAddress()) + if err != nil { + return err + } + + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.UpdateClient(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } +} + +// NewSubmitMisbehaviourCmd defines the command to submit a misbehaviour to prevent +// future updates. +func NewSubmitMisbehaviourCmd() *cobra.Command { + return &cobra.Command{ + Use: "misbehaviour [path/to/misbehaviour.json]", + Short: "submit a client misbehaviour", + Long: "submit a client misbehaviour to prevent future updates", + Example: fmt.Sprintf("%s tx ibc %s misbehaviour [path/to/misbehaviour.json] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + var misbehaviour exported.Misbehaviour + misbehaviourContentOrFileName := args[0] + if err := cdc.UnmarshalInterfaceJSON([]byte(misbehaviourContentOrFileName), &misbehaviour); err != nil { + + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(misbehaviourContentOrFileName) + if err != nil { + return errors.Wrap(err, "neither JSON input nor path to .json file for misbehaviour were provided") + } + + if err := cdc.UnmarshalInterfaceJSON(contents, misbehaviour); err != nil { + return errors.Wrap(err, "error unmarshalling misbehaviour file") + } + } + + msg, err := types.NewMsgSubmitMisbehaviour(misbehaviour.GetClientID(), misbehaviour, clientCtx.GetFromAddress()) + if err != nil { + return err + } + + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.SubmitMisbehaviour(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } +} + +// NewUpgradeClientCmd defines the command to upgrade an IBC light client. +func NewUpgradeClientCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "upgrade [client-identifier] [path/to/client_state.json] [path/to/consensus_state.json] [upgrade-client-proof] [upgrade-consensus-state-proof]", + Short: "upgrade an IBC client", + Long: `upgrade the IBC client associated with the provided client identifier while providing proof committed by the counterparty chain to the new client and consensus states + - ClientState JSON example: {"@type":"/ibc.lightclients.solomachine.v1.ClientState","sequence":"1","frozen_sequence":"0","consensus_state":{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"},"allow_update_after_proposal":false} + - ConsensusState JSON example: {"@type":"/ibc.lightclients.solomachine.v1.ConsensusState","public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtK50+5pJOoaa04qqAqrnyAqsYrwrR/INnA6UPIaYZlp"},"diversifier":"testing","timestamp":"10"}`, + Example: fmt.Sprintf("%s tx ibc %s upgrade [client-identifier] [path/to/client_state.json] [path/to/consensus_state.json] [client-state-proof] [consensus-state-proof] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName), + Args: cobra.ExactArgs(5), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + clientID := args[0] + + // attempt to unmarshal client state argument + var clientState exported.ClientState + clientContentOrFileName := args[1] + if err := cdc.UnmarshalInterfaceJSON([]byte(clientContentOrFileName), &clientState); err != nil { + + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(clientContentOrFileName) + if err != nil { + return errors.Wrap(err, "neither JSON input nor path to .json file for client state were provided") + } + + if err := cdc.UnmarshalInterfaceJSON(contents, &clientState); err != nil { + return errors.Wrap(err, "error unmarshalling client state file") + } + } + + // attempt to unmarshal consensus state argument + var consensusState exported.ConsensusState + consensusContentOrFileName := args[2] + if err := cdc.UnmarshalInterfaceJSON([]byte(consensusContentOrFileName), &consensusState); err != nil { + + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(consensusContentOrFileName) + if err != nil { + return errors.Wrap(err, "neither JSON input nor path to .json file for consensus state were provided") + } + + if err := cdc.UnmarshalInterfaceJSON(contents, &consensusState); err != nil { + return errors.Wrap(err, "error unmarshalling consensus state file") + } + } + + proofUpgradeClient := []byte(args[3]) + proofUpgradeConsensus := []byte(args[4]) + + msg, err := types.NewMsgUpgradeClient(clientID, clientState, consensusState, proofUpgradeClient, proofUpgradeConsensus, clientCtx.GetFromAddress()) + if err != nil { + return err + } + + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.UpgradeClient(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewCmdSubmitUpdateClientProposal implements a command handler for submitting an update IBC client proposal transaction. +func NewCmdSubmitUpdateClientProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "update-client [subject-client-id] [substitute-client-id] [initial-height] [flags]", + Args: cobra.ExactArgs(3), + Short: "Submit an update IBC client proposal", + Long: "Submit an update IBC client proposal along with an initial deposit.\n" + + "Please specify a subject client identifier you want to update..\n" + + "Please specify the substitute client the subject client will use and the initial height to reference the substitute client's state.", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + title, err := cmd.Flags().GetString(govcli.FlagTitle) + if err != nil { + return err + } + + description, err := cmd.Flags().GetString(govcli.FlagDescription) + if err != nil { + return err + } + + subjectClientID := args[0] + substituteClientID := args[1] + + initialHeight, err := types.ParseHeight(args[2]) + if err != nil { + return err + } + + content := types.NewClientUpdateProposal(title, description, subjectClientID, substituteClientID, initialHeight) + + from := clientCtx.GetFromAddress() + + depositStr, err := cmd.Flags().GetString(govcli.FlagDeposit) + if err != nil { + return err + } + deposit, err := sdk.ParseCoinsNormalized(depositStr) + if err != nil { + return err + } + + msg, err := govtypes.NewMsgSubmitProposal(content, deposit, from) + if err != nil { + return err + } + + if err = msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + cmd.Flags().String(govcli.FlagTitle, "", "title of proposal") + cmd.Flags().String(govcli.FlagDescription, "", "description of proposal") + cmd.Flags().String(govcli.FlagDeposit, "", "deposit of proposal") + + return cmd +} diff --git a/core/02-client/client/proposal_handler.go b/core/02-client/client/proposal_handler.go new file mode 100644 index 0000000000..63585cbe50 --- /dev/null +++ b/core/02-client/client/proposal_handler.go @@ -0,0 +1,8 @@ +package client + +import ( + govclient "github.com/cosmos/cosmos-sdk/x/gov/client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/cli" +) + +var ProposalHandler = govclient.NewProposalHandler(cli.NewCmdSubmitUpdateClientProposal, nil) diff --git a/core/02-client/client/utils/utils.go b/core/02-client/client/utils/utils.go new file mode 100644 index 0000000000..1a7bc003bc --- /dev/null +++ b/core/02-client/client/utils/utils.go @@ -0,0 +1,199 @@ +package utils + +import ( + "context" + + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/cosmos/cosmos-sdk/client" + + "github.com/cosmos/cosmos-sdk/codec" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" +) + +// QueryClientState returns a client state. If prove is true, it performs an ABCI store query +// in order to retrieve the merkle proof. Otherwise, it uses the gRPC query client. +func QueryClientState( + clientCtx client.Context, clientID string, prove bool, +) (*types.QueryClientStateResponse, error) { + if prove { + return QueryClientStateABCI(clientCtx, clientID) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryClientStateRequest{ + ClientId: clientID, + } + + return queryClient.ClientState(context.Background(), req) +} + +// QueryClientStateABCI queries the store to get the light client state and a merkle proof. +func QueryClientStateABCI( + clientCtx client.Context, clientID string, +) (*types.QueryClientStateResponse, error) { + key := host.FullClientStateKey(clientID) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + // check if client exists + if len(value) == 0 { + return nil, sdkerrors.Wrap(types.ErrClientNotFound, clientID) + } + + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + clientState, err := types.UnmarshalClientState(cdc, value) + if err != nil { + return nil, err + } + + anyClientState, err := types.PackClientState(clientState) + if err != nil { + return nil, err + } + + clientStateRes := types.NewQueryClientStateResponse(anyClientState, proofBz, proofHeight) + return clientStateRes, nil +} + +// QueryConsensusState returns a consensus state. If prove is true, it performs an ABCI store +// query in order to retrieve the merkle proof. Otherwise, it uses the gRPC query client. +func QueryConsensusState( + clientCtx client.Context, clientID string, height exported.Height, prove, latestHeight bool, +) (*types.QueryConsensusStateResponse, error) { + if prove { + return QueryConsensusStateABCI(clientCtx, clientID, height) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryConsensusStateRequest{ + ClientId: clientID, + RevisionNumber: height.GetRevisionNumber(), + RevisionHeight: height.GetRevisionHeight(), + LatestHeight: latestHeight, + } + + return queryClient.ConsensusState(context.Background(), req) +} + +// QueryConsensusStateABCI queries the store to get the consensus state of a light client and a +// merkle proof of its existence or non-existence. +func QueryConsensusStateABCI( + clientCtx client.Context, clientID string, height exported.Height, +) (*types.QueryConsensusStateResponse, error) { + key := host.FullConsensusStateKey(clientID, height) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + // check if consensus state exists + if len(value) == 0 { + return nil, sdkerrors.Wrap(types.ErrConsensusStateNotFound, clientID) + } + + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + cs, err := types.UnmarshalConsensusState(cdc, value) + if err != nil { + return nil, err + } + + anyConsensusState, err := types.PackConsensusState(cs) + if err != nil { + return nil, err + } + + return types.NewQueryConsensusStateResponse(anyConsensusState, proofBz, proofHeight), nil +} + +// QueryTendermintHeader takes a client context and returns the appropriate +// tendermint header +func QueryTendermintHeader(clientCtx client.Context) (ibctmtypes.Header, int64, error) { + node, err := clientCtx.GetNode() + if err != nil { + return ibctmtypes.Header{}, 0, err + } + + info, err := node.ABCIInfo(context.Background()) + if err != nil { + return ibctmtypes.Header{}, 0, err + } + + height := info.Response.LastBlockHeight + + commit, err := node.Commit(context.Background(), &height) + if err != nil { + return ibctmtypes.Header{}, 0, err + } + + page := 0 + count := 10_000 + + validators, err := node.Validators(context.Background(), &height, &page, &count) + if err != nil { + return ibctmtypes.Header{}, 0, err + } + + protoCommit := commit.SignedHeader.ToProto() + protoValset, err := tmtypes.NewValidatorSet(validators.Validators).ToProto() + if err != nil { + return ibctmtypes.Header{}, 0, err + } + + header := ibctmtypes.Header{ + SignedHeader: protoCommit, + ValidatorSet: protoValset, + } + + return header, height, nil +} + +// QueryNodeConsensusState takes a client context and returns the appropriate +// tendermint consensus state +func QueryNodeConsensusState(clientCtx client.Context) (*ibctmtypes.ConsensusState, int64, error) { + node, err := clientCtx.GetNode() + if err != nil { + return &ibctmtypes.ConsensusState{}, 0, err + } + + info, err := node.ABCIInfo(context.Background()) + if err != nil { + return &ibctmtypes.ConsensusState{}, 0, err + } + + height := info.Response.LastBlockHeight + + commit, err := node.Commit(context.Background(), &height) + if err != nil { + return &ibctmtypes.ConsensusState{}, 0, err + } + + page := 1 + count := 10_000 + + nextHeight := height + 1 + nextVals, err := node.Validators(context.Background(), &nextHeight, &page, &count) + if err != nil { + return &ibctmtypes.ConsensusState{}, 0, err + } + + state := &ibctmtypes.ConsensusState{ + Timestamp: commit.Time, + Root: commitmenttypes.NewMerkleRoot(commit.AppHash), + NextValidatorsHash: tmtypes.NewValidatorSet(nextVals.Validators).Hash(), + } + + return state, height, nil +} diff --git a/core/02-client/doc.go b/core/02-client/doc.go new file mode 100644 index 0000000000..cfe3c76c6a --- /dev/null +++ b/core/02-client/doc.go @@ -0,0 +1,10 @@ +/* +Package client implements the ICS 02 - Client Semantics specification +https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics. This +concrete implementations defines types and method to store and update light +clients which tracks on other chain's state. + +The main type is `Client`, which provides `commitment.Root` to verify state proofs and `ConsensusState` to +verify header proofs. +*/ +package client diff --git a/core/02-client/genesis.go b/core/02-client/genesis.go new file mode 100644 index 0000000000..26635f0784 --- /dev/null +++ b/core/02-client/genesis.go @@ -0,0 +1,69 @@ +package client + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// InitGenesis initializes the ibc client submodule's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) { + k.SetParams(ctx, gs.Params) + + // Set all client metadata first. This will allow client keeper to overwrite client and consensus state keys + // if clients accidentally write to ClientKeeper reserved keys. + if len(gs.ClientsMetadata) != 0 { + k.SetAllClientMetadata(ctx, gs.ClientsMetadata) + } + + for _, client := range gs.Clients { + cs, ok := client.ClientState.GetCachedValue().(exported.ClientState) + if !ok { + panic("invalid client state") + } + + if !gs.Params.IsAllowedClient(cs.ClientType()) { + panic(fmt.Sprintf("client state type %s is not registered on the allowlist", cs.ClientType())) + } + + k.SetClientState(ctx, client.ClientId, cs) + } + + for _, cs := range gs.ClientsConsensus { + for _, consState := range cs.ConsensusStates { + consensusState, ok := consState.ConsensusState.GetCachedValue().(exported.ConsensusState) + if !ok { + panic(fmt.Sprintf("invalid consensus state with client ID %s at height %s", cs.ClientId, consState.Height)) + } + + k.SetClientConsensusState(ctx, cs.ClientId, consState.Height, consensusState) + } + } + + k.SetNextClientSequence(ctx, gs.NextClientSequence) + + // NOTE: localhost creation is specifically disallowed for the time being. + // Issue: https://github.com/cosmos/cosmos-sdk/issues/7871 +} + +// ExportGenesis returns the ibc client submodule's exported genesis. +// NOTE: CreateLocalhost should always be false on export since a +// created localhost will be included in the exported clients. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState { + genClients := k.GetAllGenesisClients(ctx) + clientsMetadata, err := k.GetAllClientMetadata(ctx, genClients) + if err != nil { + panic(err) + } + return types.GenesisState{ + Clients: genClients, + ClientsMetadata: clientsMetadata, + ClientsConsensus: k.GetAllConsensusStates(ctx), + Params: k.GetParams(ctx), + CreateLocalhost: false, + } +} diff --git a/core/02-client/keeper/client.go b/core/02-client/keeper/client.go new file mode 100644 index 0000000000..672dcf5d74 --- /dev/null +++ b/core/02-client/keeper/client.go @@ -0,0 +1,192 @@ +package keeper + +import ( + "github.com/armon/go-metrics" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CreateClient creates a new client state and populates it with a given consensus +// state as defined in https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#create +func (k Keeper) CreateClient( + ctx sdk.Context, clientState exported.ClientState, consensusState exported.ConsensusState, +) (string, error) { + params := k.GetParams(ctx) + if !params.IsAllowedClient(clientState.ClientType()) { + return "", sdkerrors.Wrapf( + types.ErrInvalidClientType, + "client state type %s is not registered in the allowlist", clientState.ClientType(), + ) + } + + clientID := k.GenerateClientIdentifier(ctx, clientState.ClientType()) + + k.SetClientState(ctx, clientID, clientState) + k.Logger(ctx).Info("client created at height", "client-id", clientID, "height", clientState.GetLatestHeight().String()) + + // verifies initial consensus state against client state and initializes client store with any client-specific metadata + // e.g. set ProcessedTime in Tendermint clients + if err := clientState.Initialize(ctx, k.cdc, k.ClientStore(ctx, clientID), consensusState); err != nil { + return "", err + } + + // check if consensus state is nil in case the created client is Localhost + if consensusState != nil { + k.SetClientConsensusState(ctx, clientID, clientState.GetLatestHeight(), consensusState) + } + + k.Logger(ctx).Info("client created at height", "client-id", clientID, "height", clientState.GetLatestHeight().String()) + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"ibc", "client", "create"}, + 1, + []metrics.Label{telemetry.NewLabel("client-type", clientState.ClientType())}, + ) + }() + + return clientID, nil +} + +// UpdateClient updates the consensus state and the state root from a provided header. +func (k Keeper) UpdateClient(ctx sdk.Context, clientID string, header exported.Header) error { + clientState, found := k.GetClientState(ctx, clientID) + if !found { + return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot update client with ID %s", clientID) + } + + // prevent update if the client is frozen before or at header height + if clientState.IsFrozen() && clientState.GetFrozenHeight().LTE(header.GetHeight()) { + return sdkerrors.Wrapf(types.ErrClientFrozen, "cannot update client with ID %s", clientID) + } + + clientState, consensusState, err := clientState.CheckHeaderAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, clientID), header) + if err != nil { + return sdkerrors.Wrapf(err, "cannot update client with ID %s", clientID) + } + + k.SetClientState(ctx, clientID, clientState) + + var consensusHeight exported.Height + + // we don't set consensus state for localhost client + if header != nil && clientID != exported.Localhost { + k.SetClientConsensusState(ctx, clientID, header.GetHeight(), consensusState) + consensusHeight = header.GetHeight() + } else { + consensusHeight = types.GetSelfHeight(ctx) + } + + k.Logger(ctx).Info("client state updated", "client-id", clientID, "height", consensusHeight.String()) + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"ibc", "client", "update"}, + 1, + []metrics.Label{ + telemetry.NewLabel("client-type", clientState.ClientType()), + telemetry.NewLabel("client-id", clientID), + telemetry.NewLabel("update-type", "msg"), + }, + ) + }() + + // emitting events in the keeper emits for both begin block and handler client updates + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUpdateClient, + sdk.NewAttribute(types.AttributeKeyClientID, clientID), + sdk.NewAttribute(types.AttributeKeyClientType, clientState.ClientType()), + sdk.NewAttribute(types.AttributeKeyConsensusHeight, consensusHeight.String()), + ), + ) + + return nil +} + +// UpgradeClient upgrades the client to a new client state if this new client was committed to +// by the old client at the specified upgrade height +func (k Keeper) UpgradeClient(ctx sdk.Context, clientID string, upgradedClient exported.ClientState, upgradedConsState exported.ConsensusState, + proofUpgradeClient, proofUpgradeConsState []byte) error { + clientState, found := k.GetClientState(ctx, clientID) + if !found { + return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot update client with ID %s", clientID) + } + + // prevent upgrade if current client is frozen + if clientState.IsFrozen() { + return sdkerrors.Wrapf(types.ErrClientFrozen, "cannot update client with ID %s", clientID) + } + + updatedClientState, updatedConsState, err := clientState.VerifyUpgradeAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, clientID), + upgradedClient, upgradedConsState, proofUpgradeClient, proofUpgradeConsState) + if err != nil { + return sdkerrors.Wrapf(err, "cannot upgrade client with ID %s", clientID) + } + + k.SetClientState(ctx, clientID, updatedClientState) + k.SetClientConsensusState(ctx, clientID, updatedClientState.GetLatestHeight(), updatedConsState) + + k.Logger(ctx).Info("client state upgraded", "client-id", clientID, "height", updatedClientState.GetLatestHeight().String()) + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"ibc", "client", "upgrade"}, + 1, + []metrics.Label{ + telemetry.NewLabel("client-type", updatedClientState.ClientType()), + telemetry.NewLabel("client-id", clientID), + }, + ) + }() + + // emitting events in the keeper emits for client upgrades + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUpgradeClient, + sdk.NewAttribute(types.AttributeKeyClientID, clientID), + sdk.NewAttribute(types.AttributeKeyClientType, updatedClientState.ClientType()), + sdk.NewAttribute(types.AttributeKeyConsensusHeight, updatedClientState.GetLatestHeight().String()), + ), + ) + + return nil +} + +// CheckMisbehaviourAndUpdateState checks for client misbehaviour and freezes the +// client if so. +func (k Keeper) CheckMisbehaviourAndUpdateState(ctx sdk.Context, misbehaviour exported.Misbehaviour) error { + clientState, found := k.GetClientState(ctx, misbehaviour.GetClientID()) + if !found { + return sdkerrors.Wrapf(types.ErrClientNotFound, "cannot check misbehaviour for client with ID %s", misbehaviour.GetClientID()) + } + + if clientState.IsFrozen() && clientState.GetFrozenHeight().LTE(misbehaviour.GetHeight()) { + return sdkerrors.Wrapf(types.ErrInvalidMisbehaviour, "client is already frozen at height ≤ misbehaviour height (%s ≤ %s)", clientState.GetFrozenHeight(), misbehaviour.GetHeight()) + } + + clientState, err := clientState.CheckMisbehaviourAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, misbehaviour.GetClientID()), misbehaviour) + if err != nil { + return err + } + + k.SetClientState(ctx, misbehaviour.GetClientID(), clientState) + k.Logger(ctx).Info("client frozen due to misbehaviour", "client-id", misbehaviour.GetClientID(), "height", misbehaviour.GetHeight().String()) + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"ibc", "client", "misbehaviour"}, + 1, + []metrics.Label{ + telemetry.NewLabel("client-type", misbehaviour.ClientType()), + telemetry.NewLabel("client-id", misbehaviour.GetClientID()), + }, + ) + }() + + return nil +} diff --git a/core/02-client/keeper/client_test.go b/core/02-client/keeper/client_test.go new file mode 100644 index 0000000000..0cf5c1fe1d --- /dev/null +++ b/core/02-client/keeper/client_test.go @@ -0,0 +1,603 @@ +package keeper_test + +import ( + "fmt" + "time" + + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +func (suite *KeeperTestSuite) TestCreateClient() { + cases := []struct { + msg string + clientState exported.ClientState + expPass bool + }{ + {"success", ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), true}, + {"client type not supported", localhosttypes.NewClientState(testChainID, clienttypes.NewHeight(0, 1)), false}, + } + + for i, tc := range cases { + + clientID, err := suite.keeper.CreateClient(suite.ctx, tc.clientState, suite.consensusState) + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.msg) + suite.Require().NotNil(clientID, "valid test case %d failed: %s", i, tc.msg) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.msg) + suite.Require().Equal("", clientID, "invalid test case %d passed: %s", i, tc.msg) + } + } +} + +func (suite *KeeperTestSuite) TestUpdateClientTendermint() { + // Must create header creation functions since suite.header gets recreated on each test case + createFutureUpdateFn := func(s *KeeperTestSuite) *ibctmtypes.Header { + heightPlus3 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()+3) + height := suite.header.GetHeight().(clienttypes.Height) + + return suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus3.RevisionHeight), height, suite.header.Header.Time.Add(time.Hour), + suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal}) + } + createPastUpdateFn := func(s *KeeperTestSuite) *ibctmtypes.Header { + heightMinus2 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()-2) + heightMinus4 := clienttypes.NewHeight(suite.header.GetHeight().GetRevisionNumber(), suite.header.GetHeight().GetRevisionHeight()-4) + + return suite.chainA.CreateTMClientHeader(testChainID, int64(heightMinus2.RevisionHeight), heightMinus4, suite.header.Header.Time, + suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal}) + } + var ( + updateHeader *ibctmtypes.Header + clientState *ibctmtypes.ClientState + clientID string + err error + ) + + cases := []struct { + name string + malleate func() error + expPass bool + }{ + {"valid update", func() error { + clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + + // store intermediate consensus state to check that trustedHeight does not need to be highest consensus state before header height + incrementedClientHeight := testClientHeight.Increment().(types.Height) + intermediateConsState := &ibctmtypes.ConsensusState{ + Timestamp: suite.now.Add(time.Minute), + NextValidatorsHash: suite.valSetHash, + } + suite.keeper.SetClientConsensusState(suite.ctx, clientID, incrementedClientHeight, intermediateConsState) + + clientState.LatestHeight = incrementedClientHeight + suite.keeper.SetClientState(suite.ctx, clientID, clientState) + + updateHeader = createFutureUpdateFn(suite) + return err + }, true}, + {"valid past update", func() error { + clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + suite.Require().NoError(err) + + height1 := types.NewHeight(0, 1) + + // store previous consensus state + prevConsState := &ibctmtypes.ConsensusState{ + Timestamp: suite.past, + NextValidatorsHash: suite.valSetHash, + } + suite.keeper.SetClientConsensusState(suite.ctx, clientID, height1, prevConsState) + + height2 := types.NewHeight(0, 2) + + // store intermediate consensus state to check that trustedHeight does not need to be hightest consensus state before header height + intermediateConsState := &ibctmtypes.ConsensusState{ + Timestamp: suite.past.Add(time.Minute), + NextValidatorsHash: suite.valSetHash, + } + suite.keeper.SetClientConsensusState(suite.ctx, clientID, height2, intermediateConsState) + + // updateHeader will fill in consensus state between prevConsState and suite.consState + // clientState should not be updated + updateHeader = createPastUpdateFn(suite) + return nil + }, true}, + {"client state not found", func() error { + updateHeader = createFutureUpdateFn(suite) + + return nil + }, false}, + {"consensus state not found", func() error { + clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + suite.keeper.SetClientState(suite.ctx, testClientID, clientState) + updateHeader = createFutureUpdateFn(suite) + + return nil + }, false}, + {"frozen client before update", func() error { + clientState = &ibctmtypes.ClientState{FrozenHeight: types.NewHeight(0, 1), LatestHeight: testClientHeight} + suite.keeper.SetClientState(suite.ctx, testClientID, clientState) + updateHeader = createFutureUpdateFn(suite) + + return nil + }, false}, + {"valid past update before client was frozen", func() error { + clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientState.FrozenHeight = types.NewHeight(0, testClientHeight.RevisionHeight-1) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + suite.Require().NoError(err) + + height1 := types.NewHeight(0, 1) + + // store previous consensus state + prevConsState := &ibctmtypes.ConsensusState{ + Timestamp: suite.past, + NextValidatorsHash: suite.valSetHash, + } + suite.keeper.SetClientConsensusState(suite.ctx, clientID, height1, prevConsState) + + // updateHeader will fill in consensus state between prevConsState and suite.consState + // clientState should not be updated + updateHeader = createPastUpdateFn(suite) + return nil + }, true}, + {"invalid header", func() error { + clientState = ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + _, err := suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + suite.Require().NoError(err) + updateHeader = createPastUpdateFn(suite) + + return nil + }, false}, + } + + for i, tc := range cases { + tc := tc + i := i + suite.Run(fmt.Sprintf("Case %s", tc.name), func() { + suite.SetupTest() + clientID = testClientID // must be explicitly changed + + err := tc.malleate() + suite.Require().NoError(err) + + suite.ctx = suite.ctx.WithBlockTime(updateHeader.Header.Time.Add(time.Minute)) + + err = suite.keeper.UpdateClient(suite.ctx, clientID, updateHeader) + + if tc.expPass { + suite.Require().NoError(err, err) + + expConsensusState := &ibctmtypes.ConsensusState{ + Timestamp: updateHeader.GetTime(), + Root: commitmenttypes.NewMerkleRoot(updateHeader.Header.GetAppHash()), + NextValidatorsHash: updateHeader.Header.NextValidatorsHash, + } + + newClientState, found := suite.keeper.GetClientState(suite.ctx, clientID) + suite.Require().True(found, "valid test case %d failed: %s", i, tc.name) + + consensusState, found := suite.keeper.GetClientConsensusState(suite.ctx, clientID, updateHeader.GetHeight()) + suite.Require().True(found, "valid test case %d failed: %s", i, tc.name) + + // Determine if clientState should be updated or not + if updateHeader.GetHeight().GT(clientState.GetLatestHeight()) { + // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight() + suite.Require().Equal(updateHeader.GetHeight(), newClientState.GetLatestHeight(), "clientstate height did not update") + } else { + // Update will add past consensus state, clientState should not be updated at all + suite.Require().Equal(clientState.GetLatestHeight(), newClientState.GetLatestHeight(), "client state height updated for past header") + } + + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(expConsensusState, consensusState, "consensus state should have been updated on case %s", tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + }) + } +} + +func (suite *KeeperTestSuite) TestUpdateClientLocalhost() { + revision := types.ParseChainID(suite.chainA.ChainID) + var localhostClient exported.ClientState = localhosttypes.NewClientState(suite.chainA.ChainID, types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight()))) + + ctx := suite.chainA.GetContext().WithBlockHeight(suite.chainA.GetContext().BlockHeight() + 1) + err := suite.chainA.App.IBCKeeper.ClientKeeper.UpdateClient(ctx, exported.Localhost, nil) + suite.Require().NoError(err) + + clientState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(ctx, exported.Localhost) + suite.Require().True(found) + suite.Require().Equal(localhostClient.GetLatestHeight().(types.Height).Increment(), clientState.GetLatestHeight()) +} + +func (suite *KeeperTestSuite) TestUpgradeClient() { + var ( + upgradedClient exported.ClientState + upgradedConsState exported.ConsensusState + lastHeight exported.Height + clientA string + proofUpgradedClient, proofUpgradedConsState []byte + ) + + testCases := []struct { + name string + setup func() + expPass bool + }{ + { + name: "successful upgrade", + setup: func() { + + upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + upgradedConsState = &ibctmtypes.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // last Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: true, + }, + { + name: "client state not found", + setup: func() { + + upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + upgradedConsState = &ibctmtypes.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // last Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + clientA = "wrongclientid" + }, + expPass: false, + }, + { + name: "client state frozen", + setup: func() { + + upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + upgradedConsState = &ibctmtypes.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // last Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + // set frozen client in store + tmClient, ok := cs.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClient.FrozenHeight = types.NewHeight(0, 1) + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient) + }, + expPass: false, + }, + { + name: "tendermint client VerifyUpgrade fails", + setup: func() { + + upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + upgradedConsState = &ibctmtypes.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // last Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // change upgradedClient client-specified parameters + upgradedClient = ibctmtypes.NewClientState("wrongchainID", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, true, true) + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + tc.setup() + + // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient + upgradedClient = upgradedClient.ZeroCustomFields() + + err := suite.chainA.App.IBCKeeper.ClientKeeper.UpgradeClient(suite.chainA.GetContext(), clientA, upgradedClient, upgradedConsState, proofUpgradedClient, proofUpgradedConsState) + + if tc.expPass { + suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name) + } else { + suite.Require().Error(err, "verify upgrade passed on invalid case: %s", tc.name) + } + } + +} + +func (suite *KeeperTestSuite) TestCheckMisbehaviourAndUpdateState() { + var ( + clientID string + err error + ) + + altPrivVal := ibctestingmock.NewPV() + altPubKey, err := altPrivVal.GetPubKey() + suite.Require().NoError(err) + altVal := tmtypes.NewValidator(altPubKey, 4) + + // Set valSet here with suite.valSet so it doesn't get reset on each testcase + valSet := suite.valSet + valsHash := valSet.Hash() + + // Create bothValSet with both suite validator and altVal + bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + bothValsHash := bothValSet.Hash() + // Create alternative validator set with only altVal + altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal}) + + // Create signer array and ensure it is in same order as bothValSet + _, suiteVal := suite.valSet.GetByIndex(0) + bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal) + + altSigners := []tmtypes.PrivValidator{altPrivVal} + + // Create valid Misbehaviour by making a duplicate header that signs over different block time + altTime := suite.ctx.BlockTime().Add(time.Minute) + + heightPlus3 := types.NewHeight(0, height+3) + heightPlus5 := types.NewHeight(0, height+5) + + testCases := []struct { + name string + misbehaviour *ibctmtypes.Misbehaviour + malleate func() error + expPass bool + }{ + { + "trusting period misbehavior should pass", + &ibctmtypes.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners), + ClientId: clientID, + }, + func() error { + suite.consensusState.NextValidatorsHash = bothValsHash + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + + return err + }, + true, + }, + { + "misbehavior at later height should pass", + &ibctmtypes.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, valSet, bothSigners), + ClientId: clientID, + }, + func() error { + suite.consensusState.NextValidatorsHash = valsHash + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + + // store intermediate consensus state to check that trustedHeight does not need to be highest consensus state before header height + intermediateConsState := &ibctmtypes.ConsensusState{ + Timestamp: suite.now.Add(time.Minute), + NextValidatorsHash: suite.valSetHash, + } + suite.keeper.SetClientConsensusState(suite.ctx, clientID, heightPlus3, intermediateConsState) + + clientState.LatestHeight = heightPlus3 + suite.keeper.SetClientState(suite.ctx, clientID, clientState) + + return err + }, + true, + }, + { + "misbehavior at later height with different trusted heights should pass", + &ibctmtypes.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners), + ClientId: clientID, + }, + func() error { + suite.consensusState.NextValidatorsHash = valsHash + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + + // store trusted consensus state for Header2 + intermediateConsState := &ibctmtypes.ConsensusState{ + Timestamp: suite.now.Add(time.Minute), + NextValidatorsHash: bothValsHash, + } + suite.keeper.SetClientConsensusState(suite.ctx, clientID, heightPlus3, intermediateConsState) + + clientState.LatestHeight = heightPlus3 + suite.keeper.SetClientState(suite.ctx, clientID, clientState) + + return err + }, + true, + }, + { + "trusted ConsensusState1 not found", + &ibctmtypes.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, altTime, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, valSet, bothSigners), + ClientId: clientID, + }, + func() error { + suite.consensusState.NextValidatorsHash = valsHash + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + // intermediate consensus state at height + 3 is not created + return err + }, + false, + }, + { + "trusted ConsensusState2 not found", + &ibctmtypes.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), testClientHeight, altTime, bothValSet, valSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(heightPlus5.RevisionHeight), heightPlus3, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners), + ClientId: clientID, + }, + func() error { + suite.consensusState.NextValidatorsHash = valsHash + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + // intermediate consensus state at height + 3 is not created + return err + }, + false, + }, + { + "client state not found", + &ibctmtypes.Misbehaviour{}, + func() error { return nil }, + false, + }, + { + "client already frozen at earlier height", + &ibctmtypes.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), bothValSet, bothValSet, bothSigners), + ClientId: clientID, + }, + func() error { + suite.consensusState.NextValidatorsHash = bothValsHash + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + + clientState.FrozenHeight = types.NewHeight(0, 1) + suite.keeper.SetClientState(suite.ctx, clientID, clientState) + + return err + }, + false, + }, + { + "misbehaviour check failed", + &ibctmtypes.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, altTime, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeight, suite.ctx.BlockTime(), altValSet, bothValSet, altSigners), + ClientId: clientID, + }, + func() error { + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + if err != nil { + return err + } + clientID, err = suite.keeper.CreateClient(suite.ctx, clientState, suite.consensusState) + + return err + }, + false, + }, + } + + for i, tc := range testCases { + tc := tc + i := i + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + clientID = testClientID // must be explicitly changed + + err := tc.malleate() + suite.Require().NoError(err) + + tc.misbehaviour.ClientId = clientID + + err = suite.keeper.CheckMisbehaviourAndUpdateState(suite.ctx, tc.misbehaviour) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + + clientState, found := suite.keeper.GetClientState(suite.ctx, clientID) + suite.Require().True(found, "valid test case %d failed: %s", i, tc.name) + suite.Require().True(clientState.IsFrozen(), "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight(), + "valid test case %d failed: %s. Expected FrozenHeight %s got %s", tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight()) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + }) + } +} diff --git a/core/02-client/keeper/encoding.go b/core/02-client/keeper/encoding.go new file mode 100644 index 0000000000..f2a07b864d --- /dev/null +++ b/core/02-client/keeper/encoding.go @@ -0,0 +1,42 @@ +package keeper + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// UnmarshalClientState attempts to decode and return an ClientState object from +// raw encoded bytes. +func (k Keeper) UnmarshalClientState(bz []byte) (exported.ClientState, error) { + return types.UnmarshalClientState(k.cdc, bz) +} + +// MustUnmarshalClientState attempts to decode and return an ClientState object from +// raw encoded bytes. It panics on error. +func (k Keeper) MustUnmarshalClientState(bz []byte) exported.ClientState { + return types.MustUnmarshalClientState(k.cdc, bz) +} + +// UnmarshalConsensusState attempts to decode and return an ConsensusState object from +// raw encoded bytes. +func (k Keeper) UnmarshalConsensusState(bz []byte) (exported.ConsensusState, error) { + return types.UnmarshalConsensusState(k.cdc, bz) +} + +// MustUnmarshalConsensusState attempts to decode and return an ConsensusState object from +// raw encoded bytes. It panics on error. +func (k Keeper) MustUnmarshalConsensusState(bz []byte) exported.ConsensusState { + return types.MustUnmarshalConsensusState(k.cdc, bz) +} + +// MustMarshalClientState attempts to encode an ClientState object and returns the +// raw encoded bytes. It panics on error. +func (k Keeper) MustMarshalClientState(clientState exported.ClientState) []byte { + return types.MustMarshalClientState(k.cdc, clientState) +} + +// MustMarshalConsensusState attempts to encode an ConsensusState object and returns the +// raw encoded bytes. It panics on error. +func (k Keeper) MustMarshalConsensusState(consensusState exported.ConsensusState) []byte { + return types.MustMarshalConsensusState(k.cdc, consensusState) +} diff --git a/core/02-client/keeper/grpc_query.go b/core/02-client/keeper/grpc_query.go new file mode 100644 index 0000000000..2134427729 --- /dev/null +++ b/core/02-client/keeper/grpc_query.go @@ -0,0 +1,199 @@ +package keeper + +import ( + "bytes" + "context" + "fmt" + "sort" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ types.QueryServer = Keeper{} + +// ClientState implements the Query/ClientState gRPC method +func (q Keeper) ClientState(c context.Context, req *types.QueryClientStateRequest) (*types.QueryClientStateResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := host.ClientIdentifierValidator(req.ClientId); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + clientState, found := q.GetClientState(ctx, req.ClientId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrap(types.ErrClientNotFound, req.ClientId).Error(), + ) + } + + any, err := types.PackClientState(clientState) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + proofHeight := types.GetSelfHeight(ctx) + return &types.QueryClientStateResponse{ + ClientState: any, + ProofHeight: proofHeight, + }, nil +} + +// ClientStates implements the Query/ClientStates gRPC method +func (q Keeper) ClientStates(c context.Context, req *types.QueryClientStatesRequest) (*types.QueryClientStatesResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + + clientStates := types.IdentifiedClientStates{} + store := prefix.NewStore(ctx.KVStore(q.storeKey), host.KeyClientStorePrefix) + + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + keySplit := strings.Split(string(key), "/") + if keySplit[len(keySplit)-1] != "clientState" { + return nil + } + + clientState, err := q.UnmarshalClientState(value) + if err != nil { + return err + } + + clientID := keySplit[1] + if err := host.ClientIdentifierValidator(clientID); err != nil { + return err + } + + identifiedClient := types.NewIdentifiedClientState(clientID, clientState) + clientStates = append(clientStates, identifiedClient) + return nil + }) + + if err != nil { + return nil, err + } + + sort.Sort(clientStates) + + return &types.QueryClientStatesResponse{ + ClientStates: clientStates, + Pagination: pageRes, + }, nil +} + +// ConsensusState implements the Query/ConsensusState gRPC method +func (q Keeper) ConsensusState(c context.Context, req *types.QueryConsensusStateRequest) (*types.QueryConsensusStateResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := host.ClientIdentifierValidator(req.ClientId); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + + var ( + consensusState exported.ConsensusState + found bool + ) + + height := types.NewHeight(req.RevisionNumber, req.RevisionHeight) + if req.LatestHeight { + consensusState, found = q.GetLatestClientConsensusState(ctx, req.ClientId) + } else { + if req.RevisionHeight == 0 { + return nil, status.Error(codes.InvalidArgument, "consensus state height cannot be 0") + } + + consensusState, found = q.GetClientConsensusState(ctx, req.ClientId, height) + } + + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(types.ErrConsensusStateNotFound, "client-id: %s, height: %s", req.ClientId, height).Error(), + ) + } + + any, err := types.PackConsensusState(consensusState) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + proofHeight := types.GetSelfHeight(ctx) + return &types.QueryConsensusStateResponse{ + ConsensusState: any, + ProofHeight: proofHeight, + }, nil +} + +// ConsensusStates implements the Query/ConsensusStates gRPC method +func (q Keeper) ConsensusStates(c context.Context, req *types.QueryConsensusStatesRequest) (*types.QueryConsensusStatesResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := host.ClientIdentifierValidator(req.ClientId); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + + consensusStates := []types.ConsensusStateWithHeight{} + store := prefix.NewStore(ctx.KVStore(q.storeKey), host.FullClientKey(req.ClientId, []byte(fmt.Sprintf("%s/", host.KeyConsensusStatePrefix)))) + + pageRes, err := query.FilteredPaginate(store, req.Pagination, func(key, value []byte, accumulate bool) (bool, error) { + // filter any metadata stored under consensus state key + if bytes.Contains(key, []byte("/")) { + return false, nil + } + + height, err := types.ParseHeight(string(key)) + if err != nil { + return false, err + } + + consensusState, err := q.UnmarshalConsensusState(value) + if err != nil { + return false, err + } + + consensusStates = append(consensusStates, types.NewConsensusStateWithHeight(height, consensusState)) + return true, nil + }) + + if err != nil { + return nil, err + } + + return &types.QueryConsensusStatesResponse{ + ConsensusStates: consensusStates, + Pagination: pageRes, + }, nil +} + +// ClientParams implements the Query/ClientParams gRPC method +func (q Keeper) ClientParams(c context.Context, _ *types.QueryClientParamsRequest) (*types.QueryClientParamsResponse, error) { + ctx := sdk.UnwrapSDKContext(c) + params := q.GetParams(ctx) + + return &types.QueryClientParamsResponse{ + Params: ¶ms, + }, nil +} diff --git a/core/02-client/keeper/grpc_query_test.go b/core/02-client/keeper/grpc_query_test.go new file mode 100644 index 0000000000..5e361a76f0 --- /dev/null +++ b/core/02-client/keeper/grpc_query_test.go @@ -0,0 +1,381 @@ +package keeper_test + +import ( + "fmt" + "time" + + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *KeeperTestSuite) TestQueryClientState() { + var ( + req *types.QueryClientStateRequest + expClientState *codectypes.Any + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + {"invalid clientID", + func() { + req = &types.QueryClientStateRequest{} + }, + false, + }, + {"client not found", + func() { + req = &types.QueryClientStateRequest{ + ClientId: testClientID, + } + }, + false, + }, + { + "success", + func() { + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + suite.keeper.SetClientState(suite.ctx, testClientID, clientState) + + var err error + expClientState, err = types.PackClientState(clientState) + suite.Require().NoError(err) + + req = &types.QueryClientStateRequest{ + ClientId: testClientID, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.ctx) + res, err := suite.queryClient.ClientState(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expClientState, res.ClientState) + + // ensure UnpackInterfaces is defined + cachedValue := res.ClientState.GetCachedValue() + suite.Require().NotNil(cachedValue) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryClientStates() { + var ( + req *types.QueryClientStatesRequest + expClientStates = types.IdentifiedClientStates{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty pagination", + func() { + req = &types.QueryClientStatesRequest{} + }, + true, + }, + { + "success, no results", + func() { + req = &types.QueryClientStatesRequest{ + Pagination: &query.PageRequest{ + Limit: 3, + CountTotal: true, + }, + } + }, + true, + }, + { + "success", + func() { + clientA1, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + clientA2, _ := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint) + + clientStateA1 := suite.chainA.GetClientState(clientA1) + clientStateA2 := suite.chainA.GetClientState(clientA2) + + idcs := types.NewIdentifiedClientState(clientA1, clientStateA1) + idcs2 := types.NewIdentifiedClientState(clientA2, clientStateA2) + + // order is sorted by client id, localhost is last + expClientStates = types.IdentifiedClientStates{idcs, idcs2}.Sort() + req = &types.QueryClientStatesRequest{ + Pagination: &query.PageRequest{ + Limit: 7, + CountTotal: true, + }, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + expClientStates = nil + + tc.malleate() + + // always add localhost which is created by default in init genesis + localhostClientState := suite.chainA.GetClientState(exported.Localhost) + identifiedLocalhost := types.NewIdentifiedClientState(exported.Localhost, localhostClientState) + expClientStates = append(expClientStates, identifiedLocalhost) + + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.ClientStates(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expClientStates.Sort(), res.ClientStates) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryConsensusState() { + var ( + req *types.QueryConsensusStateRequest + expConsensusState *codectypes.Any + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "invalid clientID", + func() { + req = &types.QueryConsensusStateRequest{} + }, + false, + }, + { + "invalid height", + func() { + req = &types.QueryConsensusStateRequest{ + ClientId: testClientID, + RevisionNumber: 0, + RevisionHeight: 0, + LatestHeight: false, + } + }, + false, + }, + { + "consensus state not found", + func() { + req = &types.QueryConsensusStateRequest{ + ClientId: testClientID, + LatestHeight: true, + } + }, + false, + }, + { + "success latest height", + func() { + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + cs := ibctmtypes.NewConsensusState( + suite.consensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte("hash1")), nil, + ) + suite.keeper.SetClientState(suite.ctx, testClientID, clientState) + suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, cs) + + var err error + expConsensusState, err = types.PackConsensusState(cs) + suite.Require().NoError(err) + + req = &types.QueryConsensusStateRequest{ + ClientId: testClientID, + LatestHeight: true, + } + }, + true, + }, + { + "success with height", + func() { + cs := ibctmtypes.NewConsensusState( + suite.consensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte("hash1")), nil, + ) + suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, cs) + + var err error + expConsensusState, err = types.PackConsensusState(cs) + suite.Require().NoError(err) + + req = &types.QueryConsensusStateRequest{ + ClientId: testClientID, + RevisionNumber: 0, + RevisionHeight: height, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.ctx) + res, err := suite.queryClient.ConsensusState(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expConsensusState, res.ConsensusState) + + // ensure UnpackInterfaces is defined + cachedValue := res.ConsensusState.GetCachedValue() + suite.Require().NotNil(cachedValue) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryConsensusStates() { + var ( + req *types.QueryConsensusStatesRequest + expConsensusStates = []types.ConsensusStateWithHeight{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "invalid client identifier", + func() { + req = &types.QueryConsensusStatesRequest{} + }, + false, + }, + { + "empty pagination", + func() { + req = &types.QueryConsensusStatesRequest{ + ClientId: testClientID, + } + }, + true, + }, + { + "success, no results", + func() { + req = &types.QueryConsensusStatesRequest{ + ClientId: testClientID, + Pagination: &query.PageRequest{ + Limit: 3, + CountTotal: true, + }, + } + }, + true, + }, + { + "success", + func() { + cs := ibctmtypes.NewConsensusState( + suite.consensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte("hash1")), nil, + ) + cs2 := ibctmtypes.NewConsensusState( + suite.consensusState.Timestamp.Add(time.Second), commitmenttypes.NewMerkleRoot([]byte("hash2")), nil, + ) + + clientState := ibctmtypes.NewClientState( + testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false, + ) + + // Use CreateClient to ensure that processedTime metadata gets stored. + clientId, err := suite.keeper.CreateClient(suite.ctx, clientState, cs) + suite.Require().NoError(err) + suite.keeper.SetClientConsensusState(suite.ctx, clientId, testClientHeight.Increment(), cs2) + + // order is swapped because the res is sorted by client id + expConsensusStates = []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight(testClientHeight, cs), + types.NewConsensusStateWithHeight(testClientHeight.Increment().(types.Height), cs2), + } + req = &types.QueryConsensusStatesRequest{ + ClientId: clientId, + Pagination: &query.PageRequest{ + Limit: 3, + CountTotal: true, + }, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.ctx) + + res, err := suite.queryClient.ConsensusStates(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(len(expConsensusStates), len(res.ConsensusStates)) + for i := range expConsensusStates { + suite.Require().NotNil(res.ConsensusStates[i]) + suite.Require().Equal(expConsensusStates[i], res.ConsensusStates[i]) + + // ensure UnpackInterfaces is defined + cachedValue := res.ConsensusStates[i].ConsensusState.GetCachedValue() + suite.Require().NotNil(cachedValue) + } + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryParams() { + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + expParams := types.DefaultParams() + res, _ := suite.queryClient.ClientParams(ctx, &types.QueryClientParamsRequest{}) + suite.Require().Equal(&expParams, res.Params) +} diff --git a/core/02-client/keeper/keeper.go b/core/02-client/keeper/keeper.go new file mode 100644 index 0000000000..67c5c0658d --- /dev/null +++ b/core/02-client/keeper/keeper.go @@ -0,0 +1,367 @@ +package keeper + +import ( + "fmt" + "reflect" + "strings" + + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/light" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +// Keeper represents a type that grants read and write permissions to any client +// state information +type Keeper struct { + storeKey sdk.StoreKey + cdc codec.BinaryMarshaler + paramSpace paramtypes.Subspace + stakingKeeper types.StakingKeeper +} + +// NewKeeper creates a new NewKeeper instance +func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace, sk types.StakingKeeper) Keeper { + // set KeyTable if it has not already been set + if !paramSpace.HasKeyTable() { + paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable()) + } + + return Keeper{ + storeKey: key, + cdc: cdc, + paramSpace: paramSpace, + stakingKeeper: sk, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName) +} + +// GenerateClientIdentifier returns the next client identifier. +func (k Keeper) GenerateClientIdentifier(ctx sdk.Context, clientType string) string { + nextClientSeq := k.GetNextClientSequence(ctx) + clientID := types.FormatClientIdentifier(clientType, nextClientSeq) + + nextClientSeq++ + k.SetNextClientSequence(ctx, nextClientSeq) + return clientID +} + +// GetClientState gets a particular client from the store +func (k Keeper) GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool) { + store := k.ClientStore(ctx, clientID) + bz := store.Get(host.ClientStateKey()) + if bz == nil { + return nil, false + } + + clientState := k.MustUnmarshalClientState(bz) + return clientState, true +} + +// SetClientState sets a particular Client to the store +func (k Keeper) SetClientState(ctx sdk.Context, clientID string, clientState exported.ClientState) { + store := k.ClientStore(ctx, clientID) + store.Set(host.ClientStateKey(), k.MustMarshalClientState(clientState)) +} + +// GetClientConsensusState gets the stored consensus state from a client at a given height. +func (k Keeper) GetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) (exported.ConsensusState, bool) { + store := k.ClientStore(ctx, clientID) + bz := store.Get(host.ConsensusStateKey(height)) + if bz == nil { + return nil, false + } + + consensusState := k.MustUnmarshalConsensusState(bz) + return consensusState, true +} + +// SetClientConsensusState sets a ConsensusState to a particular client at the given +// height +func (k Keeper) SetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height, consensusState exported.ConsensusState) { + store := k.ClientStore(ctx, clientID) + store.Set(host.ConsensusStateKey(height), k.MustMarshalConsensusState(consensusState)) +} + +// GetNextClientSequence gets the next client sequence from the store. +func (k Keeper) GetNextClientSequence(ctx sdk.Context) uint64 { + store := ctx.KVStore(k.storeKey) + bz := store.Get([]byte(types.KeyNextClientSequence)) + if bz == nil { + panic("next client sequence is nil") + } + + return sdk.BigEndianToUint64(bz) +} + +// SetNextClientSequence sets the next client sequence to the store. +func (k Keeper) SetNextClientSequence(ctx sdk.Context, sequence uint64) { + store := ctx.KVStore(k.storeKey) + bz := sdk.Uint64ToBigEndian(sequence) + store.Set([]byte(types.KeyNextClientSequence), bz) +} + +// IterateConsensusStates provides an iterator over all stored consensus states. +// objects. For each State object, cb will be called. If the cb returns true, +// the iterator will close and stop. +func (k Keeper) IterateConsensusStates(ctx sdk.Context, cb func(clientID string, cs types.ConsensusStateWithHeight) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + // consensus key is in the format "clients//consensusStates/" + if len(keySplit) != 4 || keySplit[2] != string(host.KeyConsensusStatePrefix) { + continue + } + clientID := keySplit[1] + height := types.MustParseHeight(keySplit[3]) + consensusState := k.MustUnmarshalConsensusState(iterator.Value()) + + consensusStateWithHeight := types.NewConsensusStateWithHeight(height, consensusState) + + if cb(clientID, consensusStateWithHeight) { + break + } + } +} + +// GetAllGenesisClients returns all the clients in state with their client ids returned as IdentifiedClientState +func (k Keeper) GetAllGenesisClients(ctx sdk.Context) types.IdentifiedClientStates { + var genClients types.IdentifiedClientStates + k.IterateClients(ctx, func(clientID string, cs exported.ClientState) bool { + genClients = append(genClients, types.NewIdentifiedClientState(clientID, cs)) + return false + }) + + return genClients.Sort() +} + +// GetAllClientMetadata will take a list of IdentifiedClientState and return a list +// of IdentifiedGenesisMetadata necessary for exporting and importing client metadata +// into the client store. +func (k Keeper) GetAllClientMetadata(ctx sdk.Context, genClients []types.IdentifiedClientState) ([]types.IdentifiedGenesisMetadata, error) { + genMetadata := make([]types.IdentifiedGenesisMetadata, 0) + for _, ic := range genClients { + cs, err := types.UnpackClientState(ic.ClientState) + if err != nil { + return nil, err + } + gms := cs.ExportMetadata(k.ClientStore(ctx, ic.ClientId)) + if len(gms) == 0 { + continue + } + clientMetadata := make([]types.GenesisMetadata, len(gms)) + for i, metadata := range gms { + cmd, ok := metadata.(types.GenesisMetadata) + if !ok { + return nil, sdkerrors.Wrapf(types.ErrInvalidClientMetadata, "expected metadata type: %T, got: %T", + types.GenesisMetadata{}, cmd) + } + clientMetadata[i] = cmd + } + genMetadata = append(genMetadata, types.NewIdentifiedGenesisMetadata( + ic.ClientId, + clientMetadata, + )) + } + return genMetadata, nil +} + +// SetAllClientMetadata takes a list of IdentifiedGenesisMetadata and stores all of the metadata in the client store at the appropriate paths. +func (k Keeper) SetAllClientMetadata(ctx sdk.Context, genMetadata []types.IdentifiedGenesisMetadata) { + for _, igm := range genMetadata { + // create client store + store := k.ClientStore(ctx, igm.ClientId) + // set all metadata kv pairs in client store + for _, md := range igm.ClientMetadata { + store.Set(md.GetKey(), md.GetValue()) + } + } +} + +// GetAllConsensusStates returns all stored client consensus states. +func (k Keeper) GetAllConsensusStates(ctx sdk.Context) types.ClientsConsensusStates { + clientConsStates := make(types.ClientsConsensusStates, 0) + mapClientIDToConsStateIdx := make(map[string]int) + + k.IterateConsensusStates(ctx, func(clientID string, cs types.ConsensusStateWithHeight) bool { + idx, ok := mapClientIDToConsStateIdx[clientID] + if ok { + clientConsStates[idx].ConsensusStates = append(clientConsStates[idx].ConsensusStates, cs) + return false + } + + clientConsState := types.ClientConsensusStates{ + ClientId: clientID, + ConsensusStates: []types.ConsensusStateWithHeight{cs}, + } + + clientConsStates = append(clientConsStates, clientConsState) + mapClientIDToConsStateIdx[clientID] = len(clientConsStates) - 1 + return false + }) + + return clientConsStates.Sort() +} + +// HasClientConsensusState returns if keeper has a ConsensusState for a particular +// client at the given height +func (k Keeper) HasClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) bool { + store := k.ClientStore(ctx, clientID) + return store.Has(host.ConsensusStateKey(height)) +} + +// GetLatestClientConsensusState gets the latest ConsensusState stored for a given client +func (k Keeper) GetLatestClientConsensusState(ctx sdk.Context, clientID string) (exported.ConsensusState, bool) { + clientState, ok := k.GetClientState(ctx, clientID) + if !ok { + return nil, false + } + return k.GetClientConsensusState(ctx, clientID, clientState.GetLatestHeight()) +} + +// GetSelfConsensusState introspects the (self) past historical info at a given height +// and returns the expected consensus state at that height. +// For now, can only retrieve self consensus states for the current revision +func (k Keeper) GetSelfConsensusState(ctx sdk.Context, height exported.Height) (exported.ConsensusState, bool) { + selfHeight, ok := height.(types.Height) + if !ok { + return nil, false + } + // check that height revision matches chainID revision + revision := types.ParseChainID(ctx.ChainID()) + if revision != height.GetRevisionNumber() { + return nil, false + } + histInfo, found := k.stakingKeeper.GetHistoricalInfo(ctx, int64(selfHeight.RevisionHeight)) + if !found { + return nil, false + } + + consensusState := &ibctmtypes.ConsensusState{ + Timestamp: histInfo.Header.Time, + Root: commitmenttypes.NewMerkleRoot(histInfo.Header.GetAppHash()), + NextValidatorsHash: histInfo.Header.NextValidatorsHash, + } + return consensusState, true +} + +// ValidateSelfClient validates the client parameters for a client of the running chain +// This function is only used to validate the client state the counterparty stores for this chain +// Client must be in same revision as the executing chain +func (k Keeper) ValidateSelfClient(ctx sdk.Context, clientState exported.ClientState) error { + tmClient, ok := clientState.(*ibctmtypes.ClientState) + if !ok { + return sdkerrors.Wrapf(types.ErrInvalidClient, "client must be a Tendermint client, expected: %T, got: %T", + &ibctmtypes.ClientState{}, tmClient) + } + + if clientState.IsFrozen() { + return types.ErrClientFrozen + } + + if ctx.ChainID() != tmClient.ChainId { + return sdkerrors.Wrapf(types.ErrInvalidClient, "invalid chain-id. expected: %s, got: %s", + ctx.ChainID(), tmClient.ChainId) + } + + revision := types.ParseChainID(ctx.ChainID()) + + // client must be in the same revision as executing chain + if tmClient.LatestHeight.RevisionNumber != revision { + return sdkerrors.Wrapf(types.ErrInvalidClient, "client is not in the same revision as the chain. expected revision: %d, got: %d", + tmClient.LatestHeight.RevisionNumber, revision) + } + + selfHeight := types.NewHeight(revision, uint64(ctx.BlockHeight())) + if tmClient.LatestHeight.GTE(selfHeight) { + return sdkerrors.Wrapf(types.ErrInvalidClient, "client has LatestHeight %d greater than or equal to chain height %d", + tmClient.LatestHeight, selfHeight) + } + + expectedProofSpecs := commitmenttypes.GetSDKSpecs() + if !reflect.DeepEqual(expectedProofSpecs, tmClient.ProofSpecs) { + return sdkerrors.Wrapf(types.ErrInvalidClient, "client has invalid proof specs. expected: %v got: %v", + expectedProofSpecs, tmClient.ProofSpecs) + } + + if err := light.ValidateTrustLevel(tmClient.TrustLevel.ToTendermint()); err != nil { + return sdkerrors.Wrapf(types.ErrInvalidClient, "trust-level invalid: %v", err) + } + + expectedUbdPeriod := k.stakingKeeper.UnbondingTime(ctx) + if expectedUbdPeriod != tmClient.UnbondingPeriod { + return sdkerrors.Wrapf(types.ErrInvalidClient, "invalid unbonding period. expected: %s, got: %s", + expectedUbdPeriod, tmClient.UnbondingPeriod) + } + + if tmClient.UnbondingPeriod < tmClient.TrustingPeriod { + return sdkerrors.Wrapf(types.ErrInvalidClient, "unbonding period must be greater than trusting period. unbonding period (%d) < trusting period (%d)", + tmClient.UnbondingPeriod, tmClient.TrustingPeriod) + } + + if len(tmClient.UpgradePath) != 0 { + // For now, SDK IBC implementation assumes that upgrade path (if defined) is defined by SDK upgrade module + expectedUpgradePath := []string{upgradetypes.StoreKey, upgradetypes.KeyUpgradedIBCState} + if !reflect.DeepEqual(expectedUpgradePath, tmClient.UpgradePath) { + return sdkerrors.Wrapf(types.ErrInvalidClient, "upgrade path must be the upgrade path defined by upgrade module. expected %v, got %v", + expectedUpgradePath, tmClient.UpgradePath) + } + } + return nil +} + +// IterateClients provides an iterator over all stored light client State +// objects. For each State object, cb will be called. If the cb returns true, +// the iterator will close and stop. +func (k Keeper) IterateClients(ctx sdk.Context, cb func(clientID string, cs exported.ClientState) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, host.KeyClientStorePrefix) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + if keySplit[len(keySplit)-1] != host.KeyClientState { + continue + } + clientState := k.MustUnmarshalClientState(iterator.Value()) + + // key is ibc/{clientid}/clientState + // Thus, keySplit[1] is clientID + if cb(keySplit[1], clientState) { + break + } + } +} + +// GetAllClients returns all stored light client State objects. +func (k Keeper) GetAllClients(ctx sdk.Context) (states []exported.ClientState) { + k.IterateClients(ctx, func(_ string, state exported.ClientState) bool { + states = append(states, state) + return false + }) + return states +} + +// ClientStore returns isolated prefix store for each client so they can read/write in separate +// namespace without being able to read/write other client's data +func (k Keeper) ClientStore(ctx sdk.Context, clientID string) sdk.KVStore { + clientPrefix := []byte(fmt.Sprintf("%s/%s/", host.KeyClientStorePrefix, clientID)) + return prefix.NewStore(ctx.KVStore(k.storeKey), clientPrefix) +} diff --git a/core/02-client/keeper/keeper_test.go b/core/02-client/keeper/keeper_test.go new file mode 100644 index 0000000000..c22e80cc9e --- /dev/null +++ b/core/02-client/keeper/keeper_test.go @@ -0,0 +1,389 @@ +package keeper_test + +import ( + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/suite" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/simapp" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +const ( + testChainID = "gaiahub-0" + testChainIDRevision1 = "gaiahub-1" + + testClientID = "tendermint-0" + testClientID2 = "tendermint-1" + testClientID3 = "tendermint-2" + + height = 5 + + trustingPeriod time.Duration = time.Hour * 24 * 7 * 2 + ubdPeriod time.Duration = time.Hour * 24 * 7 * 3 + maxClockDrift time.Duration = time.Second * 10 +) + +var ( + testClientHeight = types.NewHeight(0, 5) + testClientHeightRevision1 = types.NewHeight(1, 5) + newClientHeight = types.NewHeight(1, 1) +) + +type KeeperTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + + cdc codec.Marshaler + ctx sdk.Context + keeper *keeper.Keeper + consensusState *ibctmtypes.ConsensusState + header *ibctmtypes.Header + valSet *tmtypes.ValidatorSet + valSetHash tmbytes.HexBytes + privVal tmtypes.PrivValidator + now time.Time + past time.Time + + queryClient types.QueryClient +} + +func (suite *KeeperTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + + isCheckTx := false + suite.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + suite.past = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + now2 := suite.now.Add(time.Hour) + app := simapp.Setup(isCheckTx) + + suite.cdc = app.AppCodec() + suite.ctx = app.BaseApp.NewContext(isCheckTx, tmproto.Header{Height: height, ChainID: testClientID, Time: now2}) + suite.keeper = &app.IBCKeeper.ClientKeeper + suite.privVal = ibctestingmock.NewPV() + + pubKey, err := suite.privVal.GetPubKey() + suite.Require().NoError(err) + + testClientHeightMinus1 := types.NewHeight(0, height-1) + + validator := tmtypes.NewValidator(pubKey, 1) + suite.valSet = tmtypes.NewValidatorSet([]*tmtypes.Validator{validator}) + suite.valSetHash = suite.valSet.Hash() + suite.header = suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeightMinus1, now2, suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal}) + suite.consensusState = ibctmtypes.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot([]byte("hash")), suite.valSetHash) + + var validators stakingtypes.Validators + for i := 1; i < 11; i++ { + privVal := ibctestingmock.NewPV() + tmPk, err := privVal.GetPubKey() + suite.Require().NoError(err) + pk, err := cryptocodec.FromTmPubKeyInterface(tmPk) + suite.Require().NoError(err) + val, err := stakingtypes.NewValidator(sdk.ValAddress(pk.Address()), pk, stakingtypes.Description{}) + suite.Require().NoError(err) + + val.Status = stakingtypes.Bonded + val.Tokens = sdk.NewInt(rand.Int63()) + validators = append(validators, val) + + hi := stakingtypes.NewHistoricalInfo(suite.ctx.BlockHeader(), validators) + app.StakingKeeper.SetHistoricalInfo(suite.ctx, int64(i), &hi) + } + + // add localhost client + revision := types.ParseChainID(suite.chainA.ChainID) + localHostClient := localhosttypes.NewClientState( + suite.chainA.ChainID, types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())), + ) + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient) + + queryHelper := baseapp.NewQueryServerTestHelper(suite.ctx, app.InterfaceRegistry()) + types.RegisterQueryServer(queryHelper, app.IBCKeeper.ClientKeeper) + suite.queryClient = types.NewQueryClient(queryHelper) +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} + +func (suite *KeeperTestSuite) TestSetClientState() { + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + suite.keeper.SetClientState(suite.ctx, testClientID, clientState) + + retrievedState, found := suite.keeper.GetClientState(suite.ctx, testClientID) + suite.Require().True(found, "GetClientState failed") + suite.Require().Equal(clientState, retrievedState, "Client states are not equal") +} + +func (suite *KeeperTestSuite) TestSetClientConsensusState() { + suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, suite.consensusState) + + retrievedConsState, found := suite.keeper.GetClientConsensusState(suite.ctx, testClientID, testClientHeight) + suite.Require().True(found, "GetConsensusState failed") + + tmConsState, ok := retrievedConsState.(*ibctmtypes.ConsensusState) + suite.Require().True(ok) + suite.Require().Equal(suite.consensusState, tmConsState, "ConsensusState not stored correctly") +} + +func (suite *KeeperTestSuite) TestValidateSelfClient() { + testClientHeight := types.NewHeight(0, uint64(suite.chainA.GetContext().BlockHeight()-1)) + + testCases := []struct { + name string + clientState exported.ClientState + expPass bool + }{ + { + "success", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + true, + }, + { + "success with nil UpgradePath", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), nil, false, false), + true, + }, + { + "invalid client type", + localhosttypes.NewClientState(suite.chainA.ChainID, testClientHeight), + false, + }, + { + "frozen client", + &ibctmtypes.ClientState{suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false}, + false, + }, + { + "incorrect chainID", + ibctmtypes.NewClientState("gaiatestnet", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + false, + }, + { + "invalid client height", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.NewHeight(0, uint64(suite.chainA.GetContext().BlockHeight())), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + false, + }, + { + "invalid client revision", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeightRevision1, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + false, + }, + { + "invalid proof specs", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, nil, ibctesting.UpgradePath, false, false), + false, + }, + { + "invalid trust level", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.Fraction{0, 1}, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + false, + }, + { + "invalid unbonding period", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+10, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + false, + }, + { + "invalid trusting period", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ubdPeriod+10, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + false, + }, + { + "invalid upgrade path", + ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), []string{"bad", "upgrade", "path"}, false, false), + false, + }, + } + + for _, tc := range testCases { + err := suite.chainA.App.IBCKeeper.ClientKeeper.ValidateSelfClient(suite.chainA.GetContext(), tc.clientState) + if tc.expPass { + suite.Require().NoError(err, "expected valid client for case: %s", tc.name) + } else { + suite.Require().Error(err, "expected invalid client for case: %s", tc.name) + } + } +} + +func (suite KeeperTestSuite) TestGetAllGenesisClients() { + clientIDs := []string{ + testClientID2, testClientID3, testClientID, + } + expClients := []exported.ClientState{ + ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + } + + expGenClients := make(types.IdentifiedClientStates, len(expClients)) + + for i := range expClients { + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientIDs[i], expClients[i]) + expGenClients[i] = types.NewIdentifiedClientState(clientIDs[i], expClients[i]) + } + + // add localhost client + localHostClient, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), exported.Localhost) + suite.Require().True(found) + expGenClients = append(expGenClients, types.NewIdentifiedClientState(exported.Localhost, localHostClient)) + + genClients := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllGenesisClients(suite.chainA.GetContext()) + + suite.Require().Equal(expGenClients.Sort(), genClients) +} + +func (suite KeeperTestSuite) TestGetAllGenesisMetadata() { + expectedGenMetadata := []types.IdentifiedGenesisMetadata{ + types.NewIdentifiedGenesisMetadata( + "clientA", + []types.GenesisMetadata{ + types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 1)), []byte("foo")), + types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 2)), []byte("bar")), + types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 3)), []byte("baz")), + }, + ), + types.NewIdentifiedGenesisMetadata( + "clientB", + []types.GenesisMetadata{ + types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(1, 100)), []byte("val1")), + types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(2, 300)), []byte("val2")), + }, + ), + } + + genClients := []types.IdentifiedClientState{ + types.NewIdentifiedClientState("clientA", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientB", &ibctmtypes.ClientState{}), + types.NewIdentifiedClientState("clientC", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientD", &localhosttypes.ClientState{}), + } + + suite.chainA.App.IBCKeeper.ClientKeeper.SetAllClientMetadata(suite.chainA.GetContext(), expectedGenMetadata) + + actualGenMetadata, err := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllClientMetadata(suite.chainA.GetContext(), genClients) + suite.Require().NoError(err, "get client metadata returned error unexpectedly") + suite.Require().Equal(expectedGenMetadata, actualGenMetadata, "retrieved metadata is unexpected") +} + +func (suite KeeperTestSuite) TestGetConsensusState() { + suite.ctx = suite.ctx.WithBlockHeight(10) + cases := []struct { + name string + height types.Height + expPass bool + }{ + {"zero height", types.ZeroHeight(), false}, + {"height > latest height", types.NewHeight(0, uint64(suite.ctx.BlockHeight())+1), false}, + {"latest height - 1", types.NewHeight(0, uint64(suite.ctx.BlockHeight())-1), true}, + {"latest height", types.GetSelfHeight(suite.ctx), true}, + } + + for i, tc := range cases { + tc := tc + cs, found := suite.keeper.GetSelfConsensusState(suite.ctx, tc.height) + if tc.expPass { + suite.Require().True(found, "Case %d should have passed: %s", i, tc.name) + suite.Require().NotNil(cs, "Case %d should have passed: %s", i, tc.name) + } else { + suite.Require().False(found, "Case %d should have failed: %s", i, tc.name) + suite.Require().Nil(cs, "Case %d should have failed: %s", i, tc.name) + } + } +} + +func (suite KeeperTestSuite) TestConsensusStateHelpers() { + // initial setup + clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + + suite.keeper.SetClientState(suite.ctx, testClientID, clientState) + suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, suite.consensusState) + + nextState := ibctmtypes.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot([]byte("next")), suite.valSetHash) + + testClientHeightPlus5 := types.NewHeight(0, height+5) + + header := suite.chainA.CreateTMClientHeader(testClientID, int64(testClientHeightPlus5.RevisionHeight), testClientHeight, suite.header.Header.Time.Add(time.Minute), + suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal}) + + // mock update functionality + clientState.LatestHeight = header.GetHeight().(types.Height) + suite.keeper.SetClientConsensusState(suite.ctx, testClientID, header.GetHeight(), nextState) + suite.keeper.SetClientState(suite.ctx, testClientID, clientState) + + latest, ok := suite.keeper.GetLatestClientConsensusState(suite.ctx, testClientID) + suite.Require().True(ok) + suite.Require().Equal(nextState, latest, "Latest client not returned correctly") +} + +// 2 clients in total are created on chainA. The first client is updated so it contains an initial consensus state +// and a consensus state at the update height. +func (suite KeeperTestSuite) TestGetAllConsensusStates() { + clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + clientState := suite.chainA.GetClientState(clientA) + expConsensusHeight0 := clientState.GetLatestHeight() + consensusState0, ok := suite.chainA.GetConsensusState(clientA, expConsensusHeight0) + suite.Require().True(ok) + + // update client to create a second consensus state + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + clientState = suite.chainA.GetClientState(clientA) + expConsensusHeight1 := clientState.GetLatestHeight() + suite.Require().True(expConsensusHeight1.GT(expConsensusHeight0)) + consensusState1, ok := suite.chainA.GetConsensusState(clientA, expConsensusHeight1) + suite.Require().True(ok) + + expConsensus := []exported.ConsensusState{ + consensusState0, + consensusState1, + } + + // create second client on chainA + clientA2, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + clientState = suite.chainA.GetClientState(clientA2) + + expConsensusHeight2 := clientState.GetLatestHeight() + consensusState2, ok := suite.chainA.GetConsensusState(clientA2, expConsensusHeight2) + suite.Require().True(ok) + + expConsensus2 := []exported.ConsensusState{consensusState2} + + expConsensusStates := types.ClientsConsensusStates{ + types.NewClientConsensusStates(clientA, []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight(expConsensusHeight0.(types.Height), expConsensus[0]), + types.NewConsensusStateWithHeight(expConsensusHeight1.(types.Height), expConsensus[1]), + }), + types.NewClientConsensusStates(clientA2, []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight(expConsensusHeight2.(types.Height), expConsensus2[0]), + }), + }.Sort() + + consStates := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllConsensusStates(suite.chainA.GetContext()) + suite.Require().Equal(expConsensusStates, consStates, "%s \n\n%s", expConsensusStates, consStates) +} diff --git a/core/02-client/keeper/params.go b/core/02-client/keeper/params.go new file mode 100644 index 0000000000..04f4a25637 --- /dev/null +++ b/core/02-client/keeper/params.go @@ -0,0 +1,23 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +// GetAllowedClients retrieves the receive enabled boolean from the paramstore +func (k Keeper) GetAllowedClients(ctx sdk.Context) []string { + var res []string + k.paramSpace.Get(ctx, types.KeyAllowedClients, &res) + return res +} + +// GetParams returns the total set of ibc-transfer parameters. +func (k Keeper) GetParams(ctx sdk.Context) types.Params { + return types.NewParams(k.GetAllowedClients(ctx)...) +} + +// SetParams sets the total set of ibc-transfer parameters. +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramSpace.SetParamSet(ctx, ¶ms) +} diff --git a/core/02-client/keeper/params_test.go b/core/02-client/keeper/params_test.go new file mode 100644 index 0000000000..9df0859710 --- /dev/null +++ b/core/02-client/keeper/params_test.go @@ -0,0 +1,17 @@ +package keeper_test + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +func (suite *KeeperTestSuite) TestParams() { + expParams := types.DefaultParams() + + params := suite.chainA.App.IBCKeeper.ClientKeeper.GetParams(suite.chainA.GetContext()) + suite.Require().Equal(expParams, params) + + expParams.AllowedClients = []string{} + suite.chainA.App.IBCKeeper.ClientKeeper.SetParams(suite.chainA.GetContext(), expParams) + params = suite.chainA.App.IBCKeeper.ClientKeeper.GetParams(suite.chainA.GetContext()) + suite.Require().Empty(expParams.AllowedClients) +} diff --git a/core/02-client/keeper/proposal.go b/core/02-client/keeper/proposal.go new file mode 100644 index 0000000000..6d4ff350df --- /dev/null +++ b/core/02-client/keeper/proposal.go @@ -0,0 +1,72 @@ +package keeper + +import ( + "github.com/armon/go-metrics" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// ClientUpdateProposal will retrieve the subject and substitute client. +// The initial height must be greater than the latest height of the subject +// client. A callback will occur to the subject client state with the client +// prefixed store being provided for both the subject and the substitute client. +// The localhost client is not allowed to be modified with a proposal. The IBC +// client implementations are responsible for validating the parameters of the +// subtitute (enusring they match the subject's parameters) as well as copying +// the necessary consensus states from the subtitute to the subject client +// store. +func (k Keeper) ClientUpdateProposal(ctx sdk.Context, p *types.ClientUpdateProposal) error { + if p.SubjectClientId == exported.Localhost || p.SubstituteClientId == exported.Localhost { + return sdkerrors.Wrap(types.ErrInvalidUpdateClientProposal, "cannot update localhost client with proposal") + } + + subjectClientState, found := k.GetClientState(ctx, p.SubjectClientId) + if !found { + return sdkerrors.Wrapf(types.ErrClientNotFound, "subject client with ID %s", p.SubjectClientId) + } + + if subjectClientState.GetLatestHeight().GTE(p.InitialHeight) { + return sdkerrors.Wrapf(types.ErrInvalidHeight, "subject client state latest height is greater or equal to initial height (%s >= %s)", subjectClientState.GetLatestHeight(), p.InitialHeight) + } + + substituteClientState, found := k.GetClientState(ctx, p.SubstituteClientId) + if !found { + return sdkerrors.Wrapf(types.ErrClientNotFound, "substitute client with ID %s", p.SubstituteClientId) + } + + clientState, err := subjectClientState.CheckSubstituteAndUpdateState(ctx, k.cdc, k.ClientStore(ctx, p.SubjectClientId), k.ClientStore(ctx, p.SubstituteClientId), substituteClientState, p.InitialHeight) + if err != nil { + return err + } + k.SetClientState(ctx, p.SubjectClientId, clientState) + + k.Logger(ctx).Info("client updated after governance proposal passed", "client-id", p.SubjectClientId, "height", clientState.GetLatestHeight().String()) + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"ibc", "client", "update"}, + 1, + []metrics.Label{ + telemetry.NewLabel("client-type", clientState.ClientType()), + telemetry.NewLabel("client-id", p.SubjectClientId), + telemetry.NewLabel("update-type", "proposal"), + }, + ) + }() + + // emitting events in the keeper for proposal updates to clients + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeUpdateClientProposal, + sdk.NewAttribute(types.AttributeKeySubjectClientID, p.SubjectClientId), + sdk.NewAttribute(types.AttributeKeyClientType, clientState.ClientType()), + sdk.NewAttribute(types.AttributeKeyConsensusHeight, clientState.GetLatestHeight().String()), + ), + ) + + return nil +} diff --git a/core/02-client/keeper/proposal_test.go b/core/02-client/keeper/proposal_test.go new file mode 100644 index 0000000000..8dbe43f7d7 --- /dev/null +++ b/core/02-client/keeper/proposal_test.go @@ -0,0 +1,130 @@ +package keeper_test + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *KeeperTestSuite) TestClientUpdateProposal() { + var ( + subject, substitute string + subjectClientState, substituteClientState exported.ClientState + initialHeight clienttypes.Height + content *types.ClientUpdateProposal + err error + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "valid update client proposal", func() { + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight) + }, true, + }, + { + "subject and substitute use different revision numbers", func() { + tmClientState, ok := substituteClientState.(*ibctmtypes.ClientState) + suite.Require().True(ok) + consState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight) + suite.Require().True(found) + newRevisionNumber := tmClientState.GetLatestHeight().GetRevisionNumber() + 1 + + tmClientState.LatestHeight = clienttypes.NewHeight(newRevisionNumber, tmClientState.GetLatestHeight().GetRevisionHeight()) + initialHeight = clienttypes.NewHeight(newRevisionNumber, initialHeight.GetRevisionHeight()) + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight, consState) + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState) + + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight) + }, true, + }, + { + "cannot use localhost as subject", func() { + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, exported.Localhost, substitute, initialHeight) + }, false, + }, + { + "cannot use localhost as substitute", func() { + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, exported.Localhost, initialHeight) + }, false, + }, + { + "subject client does not exist", func() { + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute, initialHeight) + }, false, + }, + { + "substitute client does not exist", func() { + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID, initialHeight) + }, false, + }, + { + "subject and substitute have equal latest height", func() { + tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClientState.LatestHeight = substituteClientState.GetLatestHeight().(clienttypes.Height) + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight) + }, false, + }, + { + "update fails, client is not frozen or expired", func() { + tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClientState.FrozenHeight = clienttypes.ZeroHeight() + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight) + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + subject, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + subjectClientState = suite.chainA.GetClientState(subject) + substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + initialHeight = clienttypes.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1) + + // update substitute twice + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint) + substituteClientState = suite.chainA.GetClientState(substitute) + + tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClientState.AllowUpdateAfterMisbehaviour = true + tmClientState.AllowUpdateAfterExpiry = true + tmClientState.FrozenHeight = tmClientState.LatestHeight + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + + tmClientState, ok = substituteClientState.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClientState.AllowUpdateAfterMisbehaviour = true + tmClientState.AllowUpdateAfterExpiry = true + tmClientState.FrozenHeight = tmClientState.LatestHeight + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState) + + tc.malleate() + + err = suite.chainA.App.IBCKeeper.ClientKeeper.ClientUpdateProposal(suite.chainA.GetContext(), content) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + +} diff --git a/core/02-client/module.go b/core/02-client/module.go new file mode 100644 index 0000000000..08efee8b1a --- /dev/null +++ b/core/02-client/module.go @@ -0,0 +1,29 @@ +package client + +import ( + "github.com/gogo/protobuf/grpc" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/cli" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +// Name returns the IBC client name +func Name() string { + return types.SubModuleName +} + +// GetQueryCmd returns no root query command for the IBC client +func GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// GetTxCmd returns the root tx command for 02-client. +func GetTxCmd() *cobra.Command { + return cli.NewTxCmd() +} + +// RegisterQueryService registers the gRPC query service for IBC client. +func RegisterQueryService(server grpc.Server, queryServer types.QueryServer) { + types.RegisterQueryServer(server, queryServer) +} diff --git a/core/02-client/proposal_handler.go b/core/02-client/proposal_handler.go new file mode 100644 index 0000000000..befa95df64 --- /dev/null +++ b/core/02-client/proposal_handler.go @@ -0,0 +1,22 @@ +package client + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +// NewClientUpdateProposalHandler defines the client update proposal handler +func NewClientUpdateProposalHandler(k keeper.Keeper) govtypes.Handler { + return func(ctx sdk.Context, content govtypes.Content) error { + switch c := content.(type) { + case *types.ClientUpdateProposal: + return k.ClientUpdateProposal(ctx, c) + + default: + return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized ibc proposal content type: %T", c) + } + } +} diff --git a/core/02-client/proposal_handler_test.go b/core/02-client/proposal_handler_test.go new file mode 100644 index 0000000000..41b893186d --- /dev/null +++ b/core/02-client/proposal_handler_test.go @@ -0,0 +1,84 @@ +package client_test + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *ClientTestSuite) TestNewClientUpdateProposalHandler() { + var ( + content govtypes.Content + err error + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "valid update client proposal", func() { + subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + subjectClientState := suite.chainA.GetClientState(subject) + substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + initialHeight := clienttypes.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1) + + // update substitute twice + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint) + substituteClientState := suite.chainA.GetClientState(substitute) + + tmClientState, ok := subjectClientState.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClientState.AllowUpdateAfterMisbehaviour = true + tmClientState.FrozenHeight = tmClientState.LatestHeight + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + + // replicate changes to substitute (they must match) + tmClientState, ok = substituteClientState.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClientState.AllowUpdateAfterMisbehaviour = true + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState) + + content = clienttypes.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight) + }, true, + }, + { + "nil proposal", func() { + content = nil + }, false, + }, + { + "unsupported proposal type", func() { + content = distributiontypes.NewCommunityPoolSpendProposal(ibctesting.Title, ibctesting.Description, suite.chainA.SenderAccount.GetAddress(), sdk.NewCoins(sdk.NewCoin("communityfunds", sdk.NewInt(10)))) + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + tc.malleate() + + proposalHandler := client.NewClientUpdateProposalHandler(suite.chainA.App.IBCKeeper.ClientKeeper) + + err = proposalHandler(suite.chainA.GetContext(), content) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + +} diff --git a/core/02-client/simulation/decoder.go b/core/02-client/simulation/decoder.go new file mode 100644 index 0000000000..03a803b1b1 --- /dev/null +++ b/core/02-client/simulation/decoder.go @@ -0,0 +1,38 @@ +package simulation + +import ( + "bytes" + "fmt" + + "github.com/cosmos/cosmos-sdk/types/kv" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ ClientUnmarshaler = (*keeper.Keeper)(nil) + +// ClientUnmarshaler defines an interface for unmarshaling ICS02 interfaces. +type ClientUnmarshaler interface { + MustUnmarshalClientState([]byte) exported.ClientState + MustUnmarshalConsensusState([]byte) exported.ConsensusState +} + +// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's +// Value to the corresponding client type. +func NewDecodeStore(cdc ClientUnmarshaler, kvA, kvB kv.Pair) (string, bool) { + switch { + case bytes.HasPrefix(kvA.Key, host.KeyClientStorePrefix) && bytes.HasSuffix(kvA.Key, []byte(host.KeyClientState)): + clientStateA := cdc.MustUnmarshalClientState(kvA.Value) + clientStateB := cdc.MustUnmarshalClientState(kvB.Value) + return fmt.Sprintf("ClientState A: %v\nClientState B: %v", clientStateA, clientStateB), true + + case bytes.HasPrefix(kvA.Key, host.KeyClientStorePrefix) && bytes.Contains(kvA.Key, []byte(host.KeyConsensusStatePrefix)): + consensusStateA := cdc.MustUnmarshalConsensusState(kvA.Value) + consensusStateB := cdc.MustUnmarshalConsensusState(kvB.Value) + return fmt.Sprintf("ConsensusState A: %v\nConsensusState B: %v", consensusStateA, consensusStateB), true + + default: + return "", false + } +} diff --git a/core/02-client/simulation/decoder_test.go b/core/02-client/simulation/decoder_test.go new file mode 100644 index 0000000000..095834ba0d --- /dev/null +++ b/core/02-client/simulation/decoder_test.go @@ -0,0 +1,70 @@ +package simulation_test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/types/kv" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" +) + +func TestDecodeStore(t *testing.T) { + app := simapp.Setup(false) + clientID := "clientidone" + + height := types.NewHeight(0, 10) + + clientState := &ibctmtypes.ClientState{ + FrozenHeight: height, + } + + consState := &ibctmtypes.ConsensusState{ + Timestamp: time.Now().UTC(), + } + + kvPairs := kv.Pairs{ + Pairs: []kv.Pair{ + { + Key: host.FullClientStateKey(clientID), + Value: app.IBCKeeper.ClientKeeper.MustMarshalClientState(clientState), + }, + { + Key: host.FullConsensusStateKey(clientID, height), + Value: app.IBCKeeper.ClientKeeper.MustMarshalConsensusState(consState), + }, + { + Key: []byte{0x99}, + Value: []byte{0x99}, + }, + }, + } + tests := []struct { + name string + expectedLog string + }{ + {"ClientState", fmt.Sprintf("ClientState A: %v\nClientState B: %v", clientState, clientState)}, + {"ConsensusState", fmt.Sprintf("ConsensusState A: %v\nConsensusState B: %v", consState, consState)}, + {"other", ""}, + } + + for i, tt := range tests { + i, tt := i, tt + t.Run(tt.name, func(t *testing.T) { + res, found := simulation.NewDecodeStore(app.IBCKeeper.ClientKeeper, kvPairs.Pairs[i], kvPairs.Pairs[i]) + if i == len(tests)-1 { + require.False(t, found, string(kvPairs.Pairs[i].Key)) + require.Empty(t, res, string(kvPairs.Pairs[i].Key)) + } else { + require.True(t, found, string(kvPairs.Pairs[i].Key)) + require.Equal(t, tt.expectedLog, res, string(kvPairs.Pairs[i].Key)) + } + }) + } +} diff --git a/core/02-client/simulation/genesis.go b/core/02-client/simulation/genesis.go new file mode 100644 index 0000000000..2f23197026 --- /dev/null +++ b/core/02-client/simulation/genesis.go @@ -0,0 +1,13 @@ +package simulation + +import ( + "math/rand" + + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +// GenClientGenesis returns the default client genesis state. +func GenClientGenesis(_ *rand.Rand, _ []simtypes.Account) types.GenesisState { + return types.DefaultGenesisState() +} diff --git a/core/02-client/types/client.go b/core/02-client/types/client.go new file mode 100644 index 0000000000..6d51828af0 --- /dev/null +++ b/core/02-client/types/client.go @@ -0,0 +1,111 @@ +package types + +import ( + "fmt" + "math" + "sort" + "strings" + + proto "github.com/gogo/protobuf/proto" + + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + _ codectypes.UnpackInterfacesMessage = IdentifiedClientState{} + _ codectypes.UnpackInterfacesMessage = ConsensusStateWithHeight{} +) + +// NewIdentifiedClientState creates a new IdentifiedClientState instance +func NewIdentifiedClientState(clientID string, clientState exported.ClientState) IdentifiedClientState { + msg, ok := clientState.(proto.Message) + if !ok { + panic(fmt.Errorf("cannot proto marshal %T", clientState)) + } + + anyClientState, err := codectypes.NewAnyWithValue(msg) + if err != nil { + panic(err) + } + + return IdentifiedClientState{ + ClientId: clientID, + ClientState: anyClientState, + } +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (ics IdentifiedClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(ics.ClientState, new(exported.ClientState)) +} + +var _ sort.Interface = IdentifiedClientStates{} + +// IdentifiedClientStates defines a slice of ClientConsensusStates that supports the sort interface +type IdentifiedClientStates []IdentifiedClientState + +// Len implements sort.Interface +func (ics IdentifiedClientStates) Len() int { return len(ics) } + +// Less implements sort.Interface +func (ics IdentifiedClientStates) Less(i, j int) bool { return ics[i].ClientId < ics[j].ClientId } + +// Swap implements sort.Interface +func (ics IdentifiedClientStates) Swap(i, j int) { ics[i], ics[j] = ics[j], ics[i] } + +// Sort is a helper function to sort the set of IdentifiedClientStates in place +func (ics IdentifiedClientStates) Sort() IdentifiedClientStates { + sort.Sort(ics) + return ics +} + +// NewConsensusStateWithHeight creates a new ConsensusStateWithHeight instance +func NewConsensusStateWithHeight(height Height, consensusState exported.ConsensusState) ConsensusStateWithHeight { + msg, ok := consensusState.(proto.Message) + if !ok { + panic(fmt.Errorf("cannot proto marshal %T", consensusState)) + } + + anyConsensusState, err := codectypes.NewAnyWithValue(msg) + if err != nil { + panic(err) + } + + return ConsensusStateWithHeight{ + Height: height, + ConsensusState: anyConsensusState, + } +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (cswh ConsensusStateWithHeight) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(cswh.ConsensusState, new(exported.ConsensusState)) +} + +// ValidateClientType validates the client type. It cannot be blank or empty. It must be a valid +// client identifier when used with '0' or the maximum uint64 as the sequence. +func ValidateClientType(clientType string) error { + if strings.TrimSpace(clientType) == "" { + return sdkerrors.Wrap(ErrInvalidClientType, "client type cannot be blank") + } + + smallestPossibleClientID := FormatClientIdentifier(clientType, 0) + largestPossibleClientID := FormatClientIdentifier(clientType, uint64(math.MaxUint64)) + + // IsValidClientID will check client type format and if the sequence is a uint64 + if !IsValidClientID(smallestPossibleClientID) { + return sdkerrors.Wrap(ErrInvalidClientType, "") + } + + if err := host.ClientIdentifierValidator(smallestPossibleClientID); err != nil { + return sdkerrors.Wrap(err, "client type results in smallest client identifier being invalid") + } + if err := host.ClientIdentifierValidator(largestPossibleClientID); err != nil { + return sdkerrors.Wrap(err, "client type results in largest client identifier being invalid") + } + + return nil +} diff --git a/core/02-client/types/client.pb.go b/core/02-client/types/client.pb.go new file mode 100644 index 0000000000..b63fce16ae --- /dev/null +++ b/core/02-client/types/client.pb.go @@ -0,0 +1,1598 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/client/v1/client.proto + +package types + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/codec/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// IdentifiedClientState defines a client state with an additional client +// identifier field. +type IdentifiedClientState struct { + // client identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // client state + ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` +} + +func (m *IdentifiedClientState) Reset() { *m = IdentifiedClientState{} } +func (m *IdentifiedClientState) String() string { return proto.CompactTextString(m) } +func (*IdentifiedClientState) ProtoMessage() {} +func (*IdentifiedClientState) Descriptor() ([]byte, []int) { + return fileDescriptor_3cc2cf764ecc47af, []int{0} +} +func (m *IdentifiedClientState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IdentifiedClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IdentifiedClientState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IdentifiedClientState) XXX_Merge(src proto.Message) { + xxx_messageInfo_IdentifiedClientState.Merge(m, src) +} +func (m *IdentifiedClientState) XXX_Size() int { + return m.Size() +} +func (m *IdentifiedClientState) XXX_DiscardUnknown() { + xxx_messageInfo_IdentifiedClientState.DiscardUnknown(m) +} + +var xxx_messageInfo_IdentifiedClientState proto.InternalMessageInfo + +func (m *IdentifiedClientState) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *IdentifiedClientState) GetClientState() *types.Any { + if m != nil { + return m.ClientState + } + return nil +} + +// ConsensusStateWithHeight defines a consensus state with an additional height +// field. +type ConsensusStateWithHeight struct { + // consensus state height + Height Height `protobuf:"bytes,1,opt,name=height,proto3" json:"height"` + // consensus state + ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml"consensus_state"` +} + +func (m *ConsensusStateWithHeight) Reset() { *m = ConsensusStateWithHeight{} } +func (m *ConsensusStateWithHeight) String() string { return proto.CompactTextString(m) } +func (*ConsensusStateWithHeight) ProtoMessage() {} +func (*ConsensusStateWithHeight) Descriptor() ([]byte, []int) { + return fileDescriptor_3cc2cf764ecc47af, []int{1} +} +func (m *ConsensusStateWithHeight) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusStateWithHeight) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusStateWithHeight.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusStateWithHeight) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusStateWithHeight.Merge(m, src) +} +func (m *ConsensusStateWithHeight) XXX_Size() int { + return m.Size() +} +func (m *ConsensusStateWithHeight) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusStateWithHeight.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusStateWithHeight proto.InternalMessageInfo + +func (m *ConsensusStateWithHeight) GetHeight() Height { + if m != nil { + return m.Height + } + return Height{} +} + +func (m *ConsensusStateWithHeight) GetConsensusState() *types.Any { + if m != nil { + return m.ConsensusState + } + return nil +} + +// ClientConsensusStates defines all the stored consensus states for a given +// client. +type ClientConsensusStates struct { + // client identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // consensus states and their heights associated with the client + ConsensusStates []ConsensusStateWithHeight `protobuf:"bytes,2,rep,name=consensus_states,json=consensusStates,proto3" json:"consensus_states" yaml:"consensus_states"` +} + +func (m *ClientConsensusStates) Reset() { *m = ClientConsensusStates{} } +func (m *ClientConsensusStates) String() string { return proto.CompactTextString(m) } +func (*ClientConsensusStates) ProtoMessage() {} +func (*ClientConsensusStates) Descriptor() ([]byte, []int) { + return fileDescriptor_3cc2cf764ecc47af, []int{2} +} +func (m *ClientConsensusStates) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientConsensusStates) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientConsensusStates.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientConsensusStates) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientConsensusStates.Merge(m, src) +} +func (m *ClientConsensusStates) XXX_Size() int { + return m.Size() +} +func (m *ClientConsensusStates) XXX_DiscardUnknown() { + xxx_messageInfo_ClientConsensusStates.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientConsensusStates proto.InternalMessageInfo + +func (m *ClientConsensusStates) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *ClientConsensusStates) GetConsensusStates() []ConsensusStateWithHeight { + if m != nil { + return m.ConsensusStates + } + return nil +} + +// ClientUpdateProposal is a governance proposal. If it passes, the substitute +// client's consensus states starting from the 'initial height' are copied over +// to the subjects client state. The proposal handler may fail if the subject +// and the substitute do not match in client and chain parameters (with +// exception to latest height, frozen height, and chain-id). The updated client +// must also be valid (cannot be expired). +type ClientUpdateProposal struct { + // the title of the update proposal + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + // the description of the proposal + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // the client identifier for the client to be updated if the proposal passes + SubjectClientId string `protobuf:"bytes,3,opt,name=subject_client_id,json=subjectClientId,proto3" json:"subject_client_id,omitempty" yaml:"subject_client_id"` + // the substitute client identifier for the client standing in for the subject + // client + SubstituteClientId string `protobuf:"bytes,4,opt,name=substitute_client_id,json=substituteClientId,proto3" json:"substitute_client_id,omitempty" yaml:"susbtitute_client_id"` + // the intital height to copy consensus states from the substitute to the + // subject + InitialHeight Height `protobuf:"bytes,5,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height" yaml:"initial_height"` +} + +func (m *ClientUpdateProposal) Reset() { *m = ClientUpdateProposal{} } +func (m *ClientUpdateProposal) String() string { return proto.CompactTextString(m) } +func (*ClientUpdateProposal) ProtoMessage() {} +func (*ClientUpdateProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_3cc2cf764ecc47af, []int{3} +} +func (m *ClientUpdateProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientUpdateProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientUpdateProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientUpdateProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientUpdateProposal.Merge(m, src) +} +func (m *ClientUpdateProposal) XXX_Size() int { + return m.Size() +} +func (m *ClientUpdateProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ClientUpdateProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientUpdateProposal proto.InternalMessageInfo + +// Height is a monotonically increasing data type +// that can be compared against another Height for the purposes of updating and +// freezing clients +// +// Normally the RevisionHeight is incremented at each height while keeping +// RevisionNumber the same. However some consensus algorithms may choose to +// reset the height in certain conditions e.g. hard forks, state-machine +// breaking changes In these cases, the RevisionNumber is incremented so that +// height continues to be monitonically increasing even as the RevisionHeight +// gets reset +type Height struct { + // the revision that the client is currently on + RevisionNumber uint64 `protobuf:"varint,1,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty" yaml:"revision_number"` + // the height within the given revision + RevisionHeight uint64 `protobuf:"varint,2,opt,name=revision_height,json=revisionHeight,proto3" json:"revision_height,omitempty" yaml:"revision_height"` +} + +func (m *Height) Reset() { *m = Height{} } +func (*Height) ProtoMessage() {} +func (*Height) Descriptor() ([]byte, []int) { + return fileDescriptor_3cc2cf764ecc47af, []int{4} +} +func (m *Height) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Height) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Height.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Height) XXX_Merge(src proto.Message) { + xxx_messageInfo_Height.Merge(m, src) +} +func (m *Height) XXX_Size() int { + return m.Size() +} +func (m *Height) XXX_DiscardUnknown() { + xxx_messageInfo_Height.DiscardUnknown(m) +} + +var xxx_messageInfo_Height proto.InternalMessageInfo + +// Params defines the set of IBC light client parameters. +type Params struct { + // allowed_clients defines the list of allowed client state types. + AllowedClients []string `protobuf:"bytes,1,rep,name=allowed_clients,json=allowedClients,proto3" json:"allowed_clients,omitempty" yaml:"allowed_clients"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_3cc2cf764ecc47af, []int{5} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetAllowedClients() []string { + if m != nil { + return m.AllowedClients + } + return nil +} + +func init() { + proto.RegisterType((*IdentifiedClientState)(nil), "ibcgo.core.client.v1.IdentifiedClientState") + proto.RegisterType((*ConsensusStateWithHeight)(nil), "ibcgo.core.client.v1.ConsensusStateWithHeight") + proto.RegisterType((*ClientConsensusStates)(nil), "ibcgo.core.client.v1.ClientConsensusStates") + proto.RegisterType((*ClientUpdateProposal)(nil), "ibcgo.core.client.v1.ClientUpdateProposal") + proto.RegisterType((*Height)(nil), "ibcgo.core.client.v1.Height") + proto.RegisterType((*Params)(nil), "ibcgo.core.client.v1.Params") +} + +func init() { proto.RegisterFile("ibcgo/core/client/v1/client.proto", fileDescriptor_3cc2cf764ecc47af) } + +var fileDescriptor_3cc2cf764ecc47af = []byte{ + // 636 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0x9b, 0xae, 0xab, 0x56, 0x17, 0xda, 0x11, 0x52, 0xd6, 0x8d, 0xd1, 0x14, 0x9f, 0x7a, + 0x59, 0xc2, 0xca, 0x6d, 0x37, 0xda, 0x03, 0xdb, 0x01, 0x34, 0x8c, 0x10, 0x88, 0x4b, 0x95, 0x3f, + 0x5e, 0x6a, 0x94, 0xc6, 0x55, 0xec, 0x0c, 0x95, 0x4f, 0xc0, 0x91, 0x23, 0x07, 0x0e, 0x7c, 0x04, + 0x3e, 0x05, 0xda, 0x71, 0x17, 0x24, 0x4e, 0x11, 0xda, 0xbe, 0x41, 0x3e, 0x01, 0x8a, 0xed, 0x6c, + 0x6b, 0xd8, 0xa4, 0x89, 0x9b, 0xf3, 0xfa, 0xf1, 0xef, 0x7d, 0xde, 0x47, 0x8e, 0xc1, 0x63, 0xe2, + 0x7a, 0x01, 0xb5, 0x3d, 0x1a, 0x63, 0xdb, 0x0b, 0x09, 0x8e, 0xb8, 0x7d, 0xbc, 0xab, 0x56, 0xd6, + 0x3c, 0xa6, 0x9c, 0xea, 0x86, 0x90, 0x58, 0xb9, 0xc4, 0x52, 0x1b, 0xc7, 0xbb, 0x5b, 0x46, 0x40, + 0x03, 0x2a, 0x04, 0x76, 0xbe, 0x92, 0xda, 0xad, 0xcd, 0x80, 0xd2, 0x20, 0xc4, 0xb6, 0xf8, 0x72, + 0x93, 0x23, 0xdb, 0x89, 0x16, 0x72, 0x0b, 0x7e, 0xd3, 0x40, 0xe7, 0xc0, 0xc7, 0x11, 0x27, 0x47, + 0x04, 0xfb, 0x63, 0x01, 0x7a, 0xcd, 0x1d, 0x8e, 0xf5, 0x5d, 0xd0, 0x90, 0xdc, 0x09, 0xf1, 0xbb, + 0x5a, 0x5f, 0x1b, 0x34, 0x46, 0x46, 0x96, 0x9a, 0xeb, 0x0b, 0x67, 0x16, 0xee, 0xc1, 0x8b, 0x2d, + 0x88, 0xd6, 0xe4, 0xfa, 0xc0, 0xd7, 0x0f, 0xc1, 0x1d, 0x55, 0x67, 0x39, 0xa2, 0x5b, 0xed, 0x6b, + 0x83, 0xe6, 0xd0, 0xb0, 0x64, 0x7b, 0xab, 0x68, 0x6f, 0x3d, 0x8b, 0x16, 0xa3, 0x8d, 0x2c, 0x35, + 0xef, 0x2f, 0xb1, 0xc4, 0x19, 0x88, 0x9a, 0xde, 0xa5, 0x09, 0xf8, 0x43, 0x03, 0xdd, 0x31, 0x8d, + 0x18, 0x8e, 0x58, 0xc2, 0x44, 0xe9, 0x2d, 0xe1, 0xd3, 0x7d, 0x4c, 0x82, 0x29, 0xd7, 0xf7, 0x40, + 0x7d, 0x2a, 0x56, 0xc2, 0x5e, 0x73, 0xb8, 0x6d, 0x5d, 0x97, 0x89, 0x25, 0xd5, 0xa3, 0xda, 0x49, + 0x6a, 0x56, 0x90, 0x3a, 0xa1, 0xbf, 0x03, 0x6d, 0xaf, 0xe0, 0xde, 0xc2, 0xed, 0x66, 0x96, 0x9a, + 0x9d, 0xdc, 0x2d, 0x2c, 0x9d, 0x82, 0xa8, 0xe5, 0x2d, 0xf9, 0x83, 0x3f, 0x35, 0xd0, 0x91, 0x39, + 0x2e, 0x1b, 0x67, 0xff, 0x93, 0xe8, 0x27, 0xb0, 0x5e, 0x6a, 0xc8, 0xba, 0xd5, 0xfe, 0xca, 0xa0, + 0x39, 0xb4, 0xae, 0x1f, 0xf6, 0xa6, 0xb0, 0x46, 0x66, 0x3e, 0x7e, 0x96, 0x9a, 0x1b, 0xaa, 0x5b, + 0x89, 0x0a, 0x51, 0x7b, 0x79, 0x0e, 0x06, 0x7f, 0x55, 0x81, 0x21, 0x07, 0x79, 0x33, 0xf7, 0x1d, + 0x8e, 0x0f, 0x63, 0x3a, 0xa7, 0xcc, 0x09, 0x75, 0x03, 0xac, 0x72, 0xc2, 0x43, 0x2c, 0x67, 0x40, + 0xf2, 0x43, 0xef, 0x83, 0xa6, 0x8f, 0x99, 0x17, 0x93, 0x39, 0x27, 0x34, 0x12, 0x69, 0x36, 0xd0, + 0xd5, 0x92, 0xbe, 0x0f, 0xee, 0xb1, 0xc4, 0xfd, 0x80, 0x3d, 0x3e, 0xb9, 0xcc, 0x61, 0x45, 0xe4, + 0xb0, 0x9d, 0xa5, 0x66, 0x57, 0x3a, 0xfb, 0x47, 0x02, 0x51, 0x5b, 0xd5, 0xc6, 0x45, 0x2c, 0xaf, + 0x80, 0xc1, 0x12, 0x97, 0x71, 0xc2, 0x13, 0x8e, 0xaf, 0xc0, 0x6a, 0x02, 0x66, 0x66, 0xa9, 0xf9, + 0xb0, 0x80, 0x31, 0xb7, 0xac, 0x82, 0x48, 0xbf, 0x3c, 0x7c, 0x81, 0x74, 0x41, 0x8b, 0x44, 0x84, + 0x13, 0x27, 0x9c, 0xa8, 0x4b, 0xb5, 0x7a, 0x8b, 0x4b, 0xf5, 0x48, 0xa5, 0xda, 0x91, 0xed, 0x96, + 0x09, 0x10, 0xdd, 0x55, 0x05, 0xa9, 0xde, 0xab, 0x7d, 0xfe, 0x6e, 0x56, 0xf2, 0x5f, 0xae, 0xae, + 0x6e, 0xf0, 0x18, 0xb4, 0x63, 0x7c, 0x4c, 0x18, 0xa1, 0xd1, 0x24, 0x4a, 0x66, 0x2e, 0x8e, 0x45, + 0xa6, 0xb5, 0xd1, 0x56, 0x96, 0x9a, 0x0f, 0x24, 0xb3, 0x24, 0x80, 0xa8, 0x55, 0x54, 0x5e, 0x8a, + 0xc2, 0x12, 0x44, 0x59, 0xaf, 0xde, 0x08, 0x29, 0x9c, 0x5d, 0x40, 0x94, 0xb5, 0xb5, 0xdc, 0xda, + 0xd7, 0xdc, 0xde, 0x0b, 0x50, 0x3f, 0x74, 0x62, 0x67, 0xc6, 0x72, 0xb0, 0x13, 0x86, 0xf4, 0x23, + 0xf6, 0x55, 0x78, 0xac, 0xab, 0xf5, 0x57, 0x06, 0x8d, 0xab, 0xe0, 0x92, 0x00, 0xa2, 0x96, 0xaa, + 0xc8, 0x60, 0xd9, 0xe8, 0xf9, 0xc9, 0x59, 0x4f, 0x3b, 0x3d, 0xeb, 0x69, 0x7f, 0xce, 0x7a, 0xda, + 0x97, 0xf3, 0x5e, 0xe5, 0xf4, 0xbc, 0x57, 0xf9, 0x7d, 0xde, 0xab, 0xbc, 0xdf, 0x09, 0x08, 0x9f, + 0x26, 0xae, 0xe5, 0xd1, 0x99, 0xed, 0x51, 0x36, 0xa3, 0xcc, 0x26, 0xae, 0xb7, 0x53, 0xbc, 0x7b, + 0x4f, 0x86, 0x3b, 0xea, 0xe9, 0xe3, 0x8b, 0x39, 0x66, 0x6e, 0x5d, 0xfc, 0x90, 0x4f, 0xff, 0x06, + 0x00, 0x00, 0xff, 0xff, 0x47, 0x7f, 0x5c, 0x7c, 0x1c, 0x05, 0x00, 0x00, +} + +func (m *IdentifiedClientState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IdentifiedClientState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IdentifiedClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClient(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintClient(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusStateWithHeight) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusStateWithHeight) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusStateWithHeight) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClient(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClient(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClientConsensusStates) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientConsensusStates) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientConsensusStates) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConsensusStates) > 0 { + for iNdEx := len(m.ConsensusStates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConsensusStates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClient(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintClient(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientUpdateProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientUpdateProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientUpdateProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.InitialHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintClient(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.SubstituteClientId) > 0 { + i -= len(m.SubstituteClientId) + copy(dAtA[i:], m.SubstituteClientId) + i = encodeVarintClient(dAtA, i, uint64(len(m.SubstituteClientId))) + i-- + dAtA[i] = 0x22 + } + if len(m.SubjectClientId) > 0 { + i -= len(m.SubjectClientId) + copy(dAtA[i:], m.SubjectClientId) + i = encodeVarintClient(dAtA, i, uint64(len(m.SubjectClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintClient(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Title) > 0 { + i -= len(m.Title) + copy(dAtA[i:], m.Title) + i = encodeVarintClient(dAtA, i, uint64(len(m.Title))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Height) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Height) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Height) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHeight != 0 { + i = encodeVarintClient(dAtA, i, uint64(m.RevisionHeight)) + i-- + dAtA[i] = 0x10 + } + if m.RevisionNumber != 0 { + i = encodeVarintClient(dAtA, i, uint64(m.RevisionNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllowedClients) > 0 { + for iNdEx := len(m.AllowedClients) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedClients[iNdEx]) + copy(dAtA[i:], m.AllowedClients[iNdEx]) + i = encodeVarintClient(dAtA, i, uint64(len(m.AllowedClients[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintClient(dAtA []byte, offset int, v uint64) int { + offset -= sovClient(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *IdentifiedClientState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovClient(uint64(l)) + } + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovClient(uint64(l)) + } + return n +} + +func (m *ConsensusStateWithHeight) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Height.Size() + n += 1 + l + sovClient(uint64(l)) + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovClient(uint64(l)) + } + return n +} + +func (m *ClientConsensusStates) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovClient(uint64(l)) + } + if len(m.ConsensusStates) > 0 { + for _, e := range m.ConsensusStates { + l = e.Size() + n += 1 + l + sovClient(uint64(l)) + } + } + return n +} + +func (m *ClientUpdateProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Title) + if l > 0 { + n += 1 + l + sovClient(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovClient(uint64(l)) + } + l = len(m.SubjectClientId) + if l > 0 { + n += 1 + l + sovClient(uint64(l)) + } + l = len(m.SubstituteClientId) + if l > 0 { + n += 1 + l + sovClient(uint64(l)) + } + l = m.InitialHeight.Size() + n += 1 + l + sovClient(uint64(l)) + return n +} + +func (m *Height) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RevisionNumber != 0 { + n += 1 + sovClient(uint64(m.RevisionNumber)) + } + if m.RevisionHeight != 0 { + n += 1 + sovClient(uint64(m.RevisionHeight)) + } + return n +} + +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedClients) > 0 { + for _, s := range m.AllowedClients { + l = len(s) + n += 1 + l + sovClient(uint64(l)) + } + } + return n +} + +func sovClient(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozClient(x uint64) (n int) { + return sovClient(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IdentifiedClientState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IdentifiedClientState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IdentifiedClientState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClient(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthClient + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusStateWithHeight) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusStateWithHeight: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusStateWithHeight: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClient(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthClient + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientConsensusStates) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientConsensusStates: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientConsensusStates: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusStates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsensusStates = append(m.ConsensusStates, ConsensusStateWithHeight{}) + if err := m.ConsensusStates[len(m.ConsensusStates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClient(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthClient + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientUpdateProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientUpdateProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientUpdateProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Title = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubjectClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubjectClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubstituteClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubstituteClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InitialHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClient(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthClient + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Height) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Height: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Height: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionNumber", wireType) + } + m.RevisionNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevisionNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHeight", wireType) + } + m.RevisionHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevisionHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipClient(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthClient + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedClients", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowClient + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthClient + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthClient + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedClients = append(m.AllowedClients, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipClient(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthClient + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipClient(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowClient + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowClient + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowClient + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthClient + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupClient + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthClient + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthClient = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowClient = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupClient = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/02-client/types/client_test.go b/core/02-client/types/client_test.go new file mode 100644 index 0000000000..2dfd3967d2 --- /dev/null +++ b/core/02-client/types/client_test.go @@ -0,0 +1,87 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *TypesTestSuite) TestMarshalConsensusStateWithHeight() { + var ( + cswh types.ConsensusStateWithHeight + ) + + testCases := []struct { + name string + malleate func() + }{ + { + "solo machine client", func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 1) + cswh = types.NewConsensusStateWithHeight(types.NewHeight(0, soloMachine.Sequence), soloMachine.ConsensusState()) + }, + }, + { + "tendermint client", func() { + clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + clientState := suite.chainA.GetClientState(clientA) + consensusState, ok := suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight()) + suite.Require().True(ok) + + cswh = types.NewConsensusStateWithHeight(clientState.GetLatestHeight().(types.Height), consensusState) + }, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + tc.malleate() + + cdc := suite.chainA.App.AppCodec() + + // marshal message + bz, err := cdc.MarshalJSON(&cswh) + suite.Require().NoError(err) + + // unmarshal message + newCswh := &types.ConsensusStateWithHeight{} + err = cdc.UnmarshalJSON(bz, newCswh) + suite.Require().NoError(err) + }) + } +} + +func TestValidateClientType(t *testing.T) { + testCases := []struct { + name string + clientType string + expPass bool + }{ + {"valid", "tendermint", true}, + {"valid solomachine", "solomachine-v1", true}, + {"too large", "tenderminttenderminttenderminttenderminttendermintt", false}, + {"too short", "t", false}, + {"blank id", " ", false}, + {"empty id", "", false}, + {"ends with dash", "tendermint-", false}, + } + + for _, tc := range testCases { + + err := types.ValidateClientType(tc.clientType) + + if tc.expPass { + require.NoError(t, err, tc.name) + } else { + require.Error(t, err, tc.name) + } + } +} diff --git a/core/02-client/types/codec.go b/core/02-client/types/codec.go new file mode 100644 index 0000000000..59a15832be --- /dev/null +++ b/core/02-client/types/codec.go @@ -0,0 +1,188 @@ +package types + +import ( + proto "github.com/gogo/protobuf/proto" + + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/msgservice" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// RegisterInterfaces registers the client interfaces to protobuf Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterInterface( + "ibc.core.client.v1.ClientState", + (*exported.ClientState)(nil), + ) + registry.RegisterInterface( + "ibc.core.client.v1.ConsensusState", + (*exported.ConsensusState)(nil), + ) + registry.RegisterInterface( + "ibc.core.client.v1.Header", + (*exported.Header)(nil), + ) + registry.RegisterInterface( + "ibc.core.client.v1.Height", + (*exported.Height)(nil), + &Height{}, + ) + registry.RegisterInterface( + "ibc.core.client.v1.Misbehaviour", + (*exported.Misbehaviour)(nil), + ) + registry.RegisterImplementations( + (*govtypes.Content)(nil), + &ClientUpdateProposal{}, + ) + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgCreateClient{}, + &MsgUpdateClient{}, + &MsgUpgradeClient{}, + &MsgSubmitMisbehaviour{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +// PackClientState constructs a new Any packed with the given client state value. It returns +// an error if the client state can't be casted to a protobuf message or if the concrete +// implemention is not registered to the protobuf codec. +func PackClientState(clientState exported.ClientState) (*codectypes.Any, error) { + msg, ok := clientState.(proto.Message) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrPackAny, "cannot proto marshal %T", clientState) + } + + anyClientState, err := codectypes.NewAnyWithValue(msg) + if err != nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrPackAny, err.Error()) + } + + return anyClientState, nil +} + +// UnpackClientState unpacks an Any into a ClientState. It returns an error if the +// client state can't be unpacked into a ClientState. +func UnpackClientState(any *codectypes.Any) (exported.ClientState, error) { + if any == nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrUnpackAny, "protobuf Any message cannot be nil") + } + + clientState, ok := any.GetCachedValue().(exported.ClientState) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into ClientState %T", any) + } + + return clientState, nil +} + +// PackConsensusState constructs a new Any packed with the given consensus state value. It returns +// an error if the consensus state can't be casted to a protobuf message or if the concrete +// implemention is not registered to the protobuf codec. +func PackConsensusState(consensusState exported.ConsensusState) (*codectypes.Any, error) { + msg, ok := consensusState.(proto.Message) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrPackAny, "cannot proto marshal %T", consensusState) + } + + anyConsensusState, err := codectypes.NewAnyWithValue(msg) + if err != nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrPackAny, err.Error()) + } + + return anyConsensusState, nil +} + +// MustPackConsensusState calls PackConsensusState and panics on error. +func MustPackConsensusState(consensusState exported.ConsensusState) *codectypes.Any { + anyConsensusState, err := PackConsensusState(consensusState) + if err != nil { + panic(err) + } + + return anyConsensusState +} + +// UnpackConsensusState unpacks an Any into a ConsensusState. It returns an error if the +// consensus state can't be unpacked into a ConsensusState. +func UnpackConsensusState(any *codectypes.Any) (exported.ConsensusState, error) { + if any == nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrUnpackAny, "protobuf Any message cannot be nil") + } + + consensusState, ok := any.GetCachedValue().(exported.ConsensusState) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into ConsensusState %T", any) + } + + return consensusState, nil +} + +// PackHeader constructs a new Any packed with the given header value. It returns +// an error if the header can't be casted to a protobuf message or if the concrete +// implemention is not registered to the protobuf codec. +func PackHeader(header exported.Header) (*codectypes.Any, error) { + msg, ok := header.(proto.Message) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrPackAny, "cannot proto marshal %T", header) + } + + anyHeader, err := codectypes.NewAnyWithValue(msg) + if err != nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrPackAny, err.Error()) + } + + return anyHeader, nil +} + +// UnpackHeader unpacks an Any into a Header. It returns an error if the +// consensus state can't be unpacked into a Header. +func UnpackHeader(any *codectypes.Any) (exported.Header, error) { + if any == nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrUnpackAny, "protobuf Any message cannot be nil") + } + + header, ok := any.GetCachedValue().(exported.Header) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into Header %T", any) + } + + return header, nil +} + +// PackMisbehaviour constructs a new Any packed with the given misbehaviour value. It returns +// an error if the misbehaviour can't be casted to a protobuf message or if the concrete +// implemention is not registered to the protobuf codec. +func PackMisbehaviour(misbehaviour exported.Misbehaviour) (*codectypes.Any, error) { + msg, ok := misbehaviour.(proto.Message) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrPackAny, "cannot proto marshal %T", misbehaviour) + } + + anyMisbhaviour, err := codectypes.NewAnyWithValue(msg) + if err != nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrPackAny, err.Error()) + } + + return anyMisbhaviour, nil +} + +// UnpackMisbehaviour unpacks an Any into a Misbehaviour. It returns an error if the +// Any can't be unpacked into a Misbehaviour. +func UnpackMisbehaviour(any *codectypes.Any) (exported.Misbehaviour, error) { + if any == nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrUnpackAny, "protobuf Any message cannot be nil") + } + + misbehaviour, ok := any.GetCachedValue().(exported.Misbehaviour) + if !ok { + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnpackAny, "cannot unpack Any into Misbehaviour %T", any) + } + + return misbehaviour, nil +} diff --git a/core/02-client/types/codec_test.go b/core/02-client/types/codec_test.go new file mode 100644 index 0000000000..75cfc97eb0 --- /dev/null +++ b/core/02-client/types/codec_test.go @@ -0,0 +1,210 @@ +package types_test + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type caseAny struct { + name string + any *codectypes.Any + expPass bool +} + +func (suite *TypesTestSuite) TestPackClientState() { + + testCases := []struct { + name string + clientState exported.ClientState + expPass bool + }{ + { + "solo machine client", + ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ClientState(), + true, + }, + { + "tendermint client", + ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + true, + }, + { + "localhost client", + localhosttypes.NewClientState(chainID, clientHeight), + true, + }, + { + "nil", + nil, + false, + }, + } + + testCasesAny := []caseAny{} + + for _, tc := range testCases { + clientAny, err := types.PackClientState(tc.clientState) + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + + testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass}) + } + + for i, tc := range testCasesAny { + cs, err := types.UnpackClientState(tc.any) + if tc.expPass { + suite.Require().NoError(err, tc.name) + suite.Require().Equal(testCases[i].clientState, cs, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *TypesTestSuite) TestPackConsensusState() { + testCases := []struct { + name string + consensusState exported.ConsensusState + expPass bool + }{ + { + "solo machine consensus", + ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ConsensusState(), + true, + }, + { + "tendermint consensus", + suite.chainA.LastHeader.ConsensusState(), + true, + }, + { + "nil", + nil, + false, + }, + } + + testCasesAny := []caseAny{} + + for _, tc := range testCases { + clientAny, err := types.PackConsensusState(tc.consensusState) + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass}) + } + + for i, tc := range testCasesAny { + cs, err := types.UnpackConsensusState(tc.any) + if tc.expPass { + suite.Require().NoError(err, tc.name) + suite.Require().Equal(testCases[i].consensusState, cs, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *TypesTestSuite) TestPackHeader() { + testCases := []struct { + name string + header exported.Header + expPass bool + }{ + { + "solo machine header", + ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateHeader(), + true, + }, + { + "tendermint header", + suite.chainA.LastHeader, + true, + }, + { + "nil", + nil, + false, + }, + } + + testCasesAny := []caseAny{} + + for _, tc := range testCases { + clientAny, err := types.PackHeader(tc.header) + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + + testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass}) + } + + for i, tc := range testCasesAny { + cs, err := types.UnpackHeader(tc.any) + if tc.expPass { + suite.Require().NoError(err, tc.name) + suite.Require().Equal(testCases[i].header, cs, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *TypesTestSuite) TestPackMisbehaviour() { + testCases := []struct { + name string + misbehaviour exported.Misbehaviour + expPass bool + }{ + { + "solo machine misbehaviour", + ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateMisbehaviour(), + true, + }, + { + "tendermint misbehaviour", + ibctmtypes.NewMisbehaviour("tendermint", suite.chainA.LastHeader, suite.chainA.LastHeader), + true, + }, + { + "nil", + nil, + false, + }, + } + + testCasesAny := []caseAny{} + + for _, tc := range testCases { + clientAny, err := types.PackMisbehaviour(tc.misbehaviour) + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + + testCasesAny = append(testCasesAny, caseAny{tc.name, clientAny, tc.expPass}) + } + + for i, tc := range testCasesAny { + cs, err := types.UnpackMisbehaviour(tc.any) + if tc.expPass { + suite.Require().NoError(err, tc.name) + suite.Require().Equal(testCases[i].misbehaviour, cs, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} diff --git a/core/02-client/types/encoding.go b/core/02-client/types/encoding.go new file mode 100644 index 0000000000..a912b13abd --- /dev/null +++ b/core/02-client/types/encoding.go @@ -0,0 +1,86 @@ +package types + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// MustUnmarshalClientState attempts to decode and return an ClientState object from +// raw encoded bytes. It panics on error. +func MustUnmarshalClientState(cdc codec.BinaryMarshaler, bz []byte) exported.ClientState { + clientState, err := UnmarshalClientState(cdc, bz) + if err != nil { + panic(fmt.Errorf("failed to decode client state: %w", err)) + } + + return clientState +} + +// MustMarshalClientState attempts to encode an ClientState object and returns the +// raw encoded bytes. It panics on error. +func MustMarshalClientState(cdc codec.BinaryMarshaler, clientState exported.ClientState) []byte { + bz, err := MarshalClientState(cdc, clientState) + if err != nil { + panic(fmt.Errorf("failed to encode client state: %w", err)) + } + + return bz +} + +// MarshalClientState protobuf serializes an ClientState interface +func MarshalClientState(cdc codec.BinaryMarshaler, clientStateI exported.ClientState) ([]byte, error) { + return cdc.MarshalInterface(clientStateI) +} + +// UnmarshalClientState returns an ClientState interface from raw encoded clientState +// bytes of a Proto-based ClientState type. An error is returned upon decoding +// failure. +func UnmarshalClientState(cdc codec.BinaryMarshaler, bz []byte) (exported.ClientState, error) { + var clientState exported.ClientState + if err := cdc.UnmarshalInterface(bz, &clientState); err != nil { + return nil, err + } + + return clientState, nil +} + +// MustUnmarshalConsensusState attempts to decode and return an ConsensusState object from +// raw encoded bytes. It panics on error. +func MustUnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) exported.ConsensusState { + consensusState, err := UnmarshalConsensusState(cdc, bz) + if err != nil { + panic(fmt.Errorf("failed to decode consensus state: %w", err)) + } + + return consensusState +} + +// MustMarshalConsensusState attempts to encode an ConsensusState object and returns the +// raw encoded bytes. It panics on error. +func MustMarshalConsensusState(cdc codec.BinaryMarshaler, consensusState exported.ConsensusState) []byte { + bz, err := MarshalConsensusState(cdc, consensusState) + if err != nil { + panic(fmt.Errorf("failed to encode consensus state: %w", err)) + } + + return bz +} + +// MarshalConsensusState protobuf serializes an ConsensusState interface +func MarshalConsensusState(cdc codec.BinaryMarshaler, cs exported.ConsensusState) ([]byte, error) { + return cdc.MarshalInterface(cs) +} + +// UnmarshalConsensusState returns an ConsensusState interface from raw encoded clientState +// bytes of a Proto-based ConsensusState type. An error is returned upon decoding +// failure. +func UnmarshalConsensusState(cdc codec.BinaryMarshaler, bz []byte) (exported.ConsensusState, error) { + var consensusState exported.ConsensusState + if err := cdc.UnmarshalInterface(bz, &consensusState); err != nil { + return nil, err + } + + return consensusState, nil +} diff --git a/core/02-client/types/errors.go b/core/02-client/types/errors.go new file mode 100644 index 0000000000..5b44cd5222 --- /dev/null +++ b/core/02-client/types/errors.go @@ -0,0 +1,35 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// IBC client sentinel errors +var ( + ErrClientExists = sdkerrors.Register(SubModuleName, 2, "light client already exists") + ErrInvalidClient = sdkerrors.Register(SubModuleName, 3, "light client is invalid") + ErrClientNotFound = sdkerrors.Register(SubModuleName, 4, "light client not found") + ErrClientFrozen = sdkerrors.Register(SubModuleName, 5, "light client is frozen due to misbehaviour") + ErrInvalidClientMetadata = sdkerrors.Register(SubModuleName, 6, "invalid client metadata") + ErrConsensusStateNotFound = sdkerrors.Register(SubModuleName, 7, "consensus state not found") + ErrInvalidConsensus = sdkerrors.Register(SubModuleName, 8, "invalid consensus state") + ErrClientTypeNotFound = sdkerrors.Register(SubModuleName, 9, "client type not found") + ErrInvalidClientType = sdkerrors.Register(SubModuleName, 10, "invalid client type") + ErrRootNotFound = sdkerrors.Register(SubModuleName, 11, "commitment root not found") + ErrInvalidHeader = sdkerrors.Register(SubModuleName, 12, "invalid client header") + ErrInvalidMisbehaviour = sdkerrors.Register(SubModuleName, 13, "invalid light client misbehaviour") + ErrFailedClientStateVerification = sdkerrors.Register(SubModuleName, 14, "client state verification failed") + ErrFailedClientConsensusStateVerification = sdkerrors.Register(SubModuleName, 15, "client consensus state verification failed") + ErrFailedConnectionStateVerification = sdkerrors.Register(SubModuleName, 16, "connection state verification failed") + ErrFailedChannelStateVerification = sdkerrors.Register(SubModuleName, 17, "channel state verification failed") + ErrFailedPacketCommitmentVerification = sdkerrors.Register(SubModuleName, 18, "packet commitment verification failed") + ErrFailedPacketAckVerification = sdkerrors.Register(SubModuleName, 19, "packet acknowledgement verification failed") + ErrFailedPacketReceiptVerification = sdkerrors.Register(SubModuleName, 20, "packet receipt verification failed") + ErrFailedNextSeqRecvVerification = sdkerrors.Register(SubModuleName, 21, "next sequence receive verification failed") + ErrSelfConsensusStateNotFound = sdkerrors.Register(SubModuleName, 22, "self consensus state not found") + ErrUpdateClientFailed = sdkerrors.Register(SubModuleName, 23, "unable to update light client") + ErrInvalidUpdateClientProposal = sdkerrors.Register(SubModuleName, 24, "invalid update client proposal") + ErrInvalidUpgradeClient = sdkerrors.Register(SubModuleName, 25, "invalid client upgrade") + ErrInvalidHeight = sdkerrors.Register(SubModuleName, 26, "invalid height") + ErrInvalidSubstitute = sdkerrors.Register(SubModuleName, 27, "invalid client state substitute") +) diff --git a/core/02-client/types/events.go b/core/02-client/types/events.go new file mode 100644 index 0000000000..d0760ba89c --- /dev/null +++ b/core/02-client/types/events.go @@ -0,0 +1,26 @@ +package types + +import ( + "fmt" + + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// IBC client events +const ( + AttributeKeyClientID = "client_id" + AttributeKeySubjectClientID = "subject_client_id" + AttributeKeyClientType = "client_type" + AttributeKeyConsensusHeight = "consensus_height" +) + +// IBC client events vars +var ( + EventTypeCreateClient = "create_client" + EventTypeUpdateClient = "update_client" + EventTypeUpgradeClient = "upgrade_client" + EventTypeSubmitMisbehaviour = "client_misbehaviour" + EventTypeUpdateClientProposal = "update_client_proposal" + + AttributeValueCategory = fmt.Sprintf("%s_%s", host.ModuleName, SubModuleName) +) diff --git a/core/02-client/types/expected_keepers.go b/core/02-client/types/expected_keepers.go new file mode 100644 index 0000000000..defc81506b --- /dev/null +++ b/core/02-client/types/expected_keepers.go @@ -0,0 +1,14 @@ +package types + +import ( + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// StakingKeeper expected staking keeper +type StakingKeeper interface { + GetHistoricalInfo(ctx sdk.Context, height int64) (stakingtypes.HistoricalInfo, bool) + UnbondingTime(ctx sdk.Context) time.Duration +} diff --git a/core/02-client/types/genesis.go b/core/02-client/types/genesis.go new file mode 100644 index 0000000000..3f197208e3 --- /dev/null +++ b/core/02-client/types/genesis.go @@ -0,0 +1,250 @@ +package types + +import ( + "fmt" + "sort" + + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + _ codectypes.UnpackInterfacesMessage = IdentifiedClientState{} + _ codectypes.UnpackInterfacesMessage = ClientsConsensusStates{} + _ codectypes.UnpackInterfacesMessage = ClientConsensusStates{} + _ codectypes.UnpackInterfacesMessage = GenesisState{} +) + +var ( + _ sort.Interface = ClientsConsensusStates{} + _ exported.GenesisMetadata = GenesisMetadata{} +) + +// ClientsConsensusStates defines a slice of ClientConsensusStates that supports the sort interface +type ClientsConsensusStates []ClientConsensusStates + +// Len implements sort.Interface +func (ccs ClientsConsensusStates) Len() int { return len(ccs) } + +// Less implements sort.Interface +func (ccs ClientsConsensusStates) Less(i, j int) bool { return ccs[i].ClientId < ccs[j].ClientId } + +// Swap implements sort.Interface +func (ccs ClientsConsensusStates) Swap(i, j int) { ccs[i], ccs[j] = ccs[j], ccs[i] } + +// Sort is a helper function to sort the set of ClientsConsensusStates in place +func (ccs ClientsConsensusStates) Sort() ClientsConsensusStates { + sort.Sort(ccs) + return ccs +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (ccs ClientsConsensusStates) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + for _, clientConsensus := range ccs { + if err := clientConsensus.UnpackInterfaces(unpacker); err != nil { + return err + } + } + return nil +} + +// NewClientConsensusStates creates a new ClientConsensusStates instance. +func NewClientConsensusStates(clientID string, consensusStates []ConsensusStateWithHeight) ClientConsensusStates { + return ClientConsensusStates{ + ClientId: clientID, + ConsensusStates: consensusStates, + } +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (ccs ClientConsensusStates) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + for _, consStateWithHeight := range ccs.ConsensusStates { + if err := consStateWithHeight.UnpackInterfaces(unpacker); err != nil { + return err + } + } + return nil +} + +// NewGenesisState creates a GenesisState instance. +func NewGenesisState( + clients []IdentifiedClientState, clientsConsensus ClientsConsensusStates, clientsMetadata []IdentifiedGenesisMetadata, + params Params, createLocalhost bool, nextClientSequence uint64, +) GenesisState { + return GenesisState{ + Clients: clients, + ClientsConsensus: clientsConsensus, + ClientsMetadata: clientsMetadata, + Params: params, + CreateLocalhost: createLocalhost, + NextClientSequence: nextClientSequence, + } +} + +// DefaultGenesisState returns the ibc client submodule's default genesis state. +func DefaultGenesisState() GenesisState { + return GenesisState{ + Clients: []IdentifiedClientState{}, + ClientsConsensus: ClientsConsensusStates{}, + Params: DefaultParams(), + CreateLocalhost: false, + NextClientSequence: 0, + } +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (gs GenesisState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + for _, client := range gs.Clients { + if err := client.UnpackInterfaces(unpacker); err != nil { + return err + } + } + + return gs.ClientsConsensus.UnpackInterfaces(unpacker) +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + // keep track of the max sequence to ensure it is less than + // the next sequence used in creating client identifers. + var maxSequence uint64 = 0 + + if err := gs.Params.Validate(); err != nil { + return err + } + + validClients := make(map[string]string) + + for i, client := range gs.Clients { + if err := host.ClientIdentifierValidator(client.ClientId); err != nil { + return fmt.Errorf("invalid client consensus state identifier %s index %d: %w", client.ClientId, i, err) + } + + clientState, ok := client.ClientState.GetCachedValue().(exported.ClientState) + if !ok { + return fmt.Errorf("invalid client state with ID %s", client.ClientId) + } + + if !gs.Params.IsAllowedClient(clientState.ClientType()) { + return fmt.Errorf("client type %s not allowed by genesis params", clientState.ClientType()) + } + if err := clientState.Validate(); err != nil { + return fmt.Errorf("invalid client %v index %d: %w", client, i, err) + } + + clientType, sequence, err := ParseClientIdentifier(client.ClientId) + if err != nil { + return err + } + + if clientType != clientState.ClientType() { + return fmt.Errorf("client state type %s does not equal client type in client identifier %s", clientState.ClientType(), clientType) + } + + if err := ValidateClientType(clientType); err != nil { + return err + } + + if sequence > maxSequence { + maxSequence = sequence + } + + // add client id to validClients map + validClients[client.ClientId] = clientState.ClientType() + } + + for _, cc := range gs.ClientsConsensus { + // check that consensus state is for a client in the genesis clients list + clientType, ok := validClients[cc.ClientId] + if !ok { + return fmt.Errorf("consensus state in genesis has a client id %s that does not map to a genesis client", cc.ClientId) + } + + for i, consensusState := range cc.ConsensusStates { + if consensusState.Height.IsZero() { + return fmt.Errorf("consensus state height cannot be zero") + } + + cs, ok := consensusState.ConsensusState.GetCachedValue().(exported.ConsensusState) + if !ok { + return fmt.Errorf("invalid consensus state with client ID %s at height %s", cc.ClientId, consensusState.Height) + } + + if err := cs.ValidateBasic(); err != nil { + return fmt.Errorf("invalid client consensus state %v clientID %s index %d: %w", cs, cc.ClientId, i, err) + } + + // ensure consensus state type matches client state type + if clientType != cs.ClientType() { + return fmt.Errorf("consensus state client type %s does not equal client state client type %s", cs.ClientType(), clientType) + } + + } + } + + for _, clientMetadata := range gs.ClientsMetadata { + // check that metadata is for a client in the genesis clients list + _, ok := validClients[clientMetadata.ClientId] + if !ok { + return fmt.Errorf("metadata in genesis has a client id %s that does not map to a genesis client", clientMetadata.ClientId) + } + + for i, gm := range clientMetadata.ClientMetadata { + if err := gm.Validate(); err != nil { + return fmt.Errorf("invalid client metadata %v clientID %s index %d: %w", gm, clientMetadata.ClientId, i, err) + } + + } + + } + + if gs.CreateLocalhost && !gs.Params.IsAllowedClient(exported.Localhost) { + return fmt.Errorf("localhost client is not registered on the allowlist") + } + + if maxSequence != 0 && maxSequence >= gs.NextClientSequence { + return fmt.Errorf("next client identifier sequence %d must be greater than the maximum sequence used in the provided client identifiers %d", gs.NextClientSequence, maxSequence) + } + + return nil +} + +// NewGenesisMetadata is a constructor for GenesisMetadata +func NewGenesisMetadata(key, val []byte) GenesisMetadata { + return GenesisMetadata{ + Key: key, + Value: val, + } +} + +// GetKey returns the key of metadata. Implements exported.GenesisMetadata interface. +func (gm GenesisMetadata) GetKey() []byte { + return gm.Key +} + +// GetValue returns the value of metadata. Implements exported.GenesisMetadata interface. +func (gm GenesisMetadata) GetValue() []byte { + return gm.Value +} + +// Validate ensures key and value of metadata are not empty +func (gm GenesisMetadata) Validate() error { + if len(gm.Key) == 0 { + return fmt.Errorf("genesis metadata key cannot be empty") + } + if len(gm.Value) == 0 { + return fmt.Errorf("genesis metadata value cannot be empty") + } + return nil +} + +// NewIdentifiedGenesisMetadata takes in a client ID and list of genesis metadata for that client +// and constructs a new IdentifiedGenesisMetadata. +func NewIdentifiedGenesisMetadata(clientID string, gms []GenesisMetadata) IdentifiedGenesisMetadata { + return IdentifiedGenesisMetadata{ + ClientId: clientID, + ClientMetadata: gms, + } +} diff --git a/core/02-client/types/genesis.pb.go b/core/02-client/types/genesis.pb.go new file mode 100644 index 0000000000..e4246f5c65 --- /dev/null +++ b/core/02-client/types/genesis.pb.go @@ -0,0 +1,1060 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/client/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ibc client submodule's genesis state. +type GenesisState struct { + // client states with their corresponding identifiers + Clients IdentifiedClientStates `protobuf:"bytes,1,rep,name=clients,proto3,castrepeated=IdentifiedClientStates" json:"clients"` + // consensus states from each client + ClientsConsensus ClientsConsensusStates `protobuf:"bytes,2,rep,name=clients_consensus,json=clientsConsensus,proto3,castrepeated=ClientsConsensusStates" json:"clients_consensus" yaml:"clients_consensus"` + // metadata from each client + ClientsMetadata []IdentifiedGenesisMetadata `protobuf:"bytes,3,rep,name=clients_metadata,json=clientsMetadata,proto3" json:"clients_metadata" yaml:"clients_metadata"` + Params Params `protobuf:"bytes,4,opt,name=params,proto3" json:"params"` + // create localhost on initialization + CreateLocalhost bool `protobuf:"varint,5,opt,name=create_localhost,json=createLocalhost,proto3" json:"create_localhost,omitempty" yaml:"create_localhost"` + // the sequence for the next generated client identifier + NextClientSequence uint64 `protobuf:"varint,6,opt,name=next_client_sequence,json=nextClientSequence,proto3" json:"next_client_sequence,omitempty" yaml:"next_client_sequence"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_a1110e97fc5e4abf, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetClients() IdentifiedClientStates { + if m != nil { + return m.Clients + } + return nil +} + +func (m *GenesisState) GetClientsConsensus() ClientsConsensusStates { + if m != nil { + return m.ClientsConsensus + } + return nil +} + +func (m *GenesisState) GetClientsMetadata() []IdentifiedGenesisMetadata { + if m != nil { + return m.ClientsMetadata + } + return nil +} + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetCreateLocalhost() bool { + if m != nil { + return m.CreateLocalhost + } + return false +} + +func (m *GenesisState) GetNextClientSequence() uint64 { + if m != nil { + return m.NextClientSequence + } + return 0 +} + +// GenesisMetadata defines the genesis type for metadata that clients may return +// with ExportMetadata +type GenesisMetadata struct { + // store key of metadata without clientID-prefix + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // metadata value + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *GenesisMetadata) Reset() { *m = GenesisMetadata{} } +func (m *GenesisMetadata) String() string { return proto.CompactTextString(m) } +func (*GenesisMetadata) ProtoMessage() {} +func (*GenesisMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_a1110e97fc5e4abf, []int{1} +} +func (m *GenesisMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisMetadata.Merge(m, src) +} +func (m *GenesisMetadata) XXX_Size() int { + return m.Size() +} +func (m *GenesisMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisMetadata proto.InternalMessageInfo + +// IdentifiedGenesisMetadata has the client metadata with the corresponding +// client id. +type IdentifiedGenesisMetadata struct { + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + ClientMetadata []GenesisMetadata `protobuf:"bytes,2,rep,name=client_metadata,json=clientMetadata,proto3" json:"client_metadata" yaml:"client_metadata"` +} + +func (m *IdentifiedGenesisMetadata) Reset() { *m = IdentifiedGenesisMetadata{} } +func (m *IdentifiedGenesisMetadata) String() string { return proto.CompactTextString(m) } +func (*IdentifiedGenesisMetadata) ProtoMessage() {} +func (*IdentifiedGenesisMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_a1110e97fc5e4abf, []int{2} +} +func (m *IdentifiedGenesisMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IdentifiedGenesisMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IdentifiedGenesisMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IdentifiedGenesisMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_IdentifiedGenesisMetadata.Merge(m, src) +} +func (m *IdentifiedGenesisMetadata) XXX_Size() int { + return m.Size() +} +func (m *IdentifiedGenesisMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_IdentifiedGenesisMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_IdentifiedGenesisMetadata proto.InternalMessageInfo + +func (m *IdentifiedGenesisMetadata) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *IdentifiedGenesisMetadata) GetClientMetadata() []GenesisMetadata { + if m != nil { + return m.ClientMetadata + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibcgo.core.client.v1.GenesisState") + proto.RegisterType((*GenesisMetadata)(nil), "ibcgo.core.client.v1.GenesisMetadata") + proto.RegisterType((*IdentifiedGenesisMetadata)(nil), "ibcgo.core.client.v1.IdentifiedGenesisMetadata") +} + +func init() { + proto.RegisterFile("ibcgo/core/client/v1/genesis.proto", fileDescriptor_a1110e97fc5e4abf) +} + +var fileDescriptor_a1110e97fc5e4abf = []byte{ + // 531 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xc1, 0x6e, 0xd3, 0x40, + 0x14, 0xcc, 0xb6, 0x69, 0x68, 0xb7, 0x15, 0x0d, 0xab, 0xa8, 0x98, 0x16, 0xd9, 0xc1, 0x12, 0x92, + 0x25, 0x14, 0x9b, 0x84, 0x5b, 0x2e, 0x48, 0xae, 0x44, 0x55, 0x09, 0x24, 0x30, 0x37, 0x2e, 0xd6, + 0x66, 0xbd, 0xb8, 0x16, 0xb6, 0x37, 0x64, 0x37, 0x11, 0x11, 0x3f, 0xc0, 0x91, 0x03, 0x1f, 0xc0, + 0x99, 0x8f, 0xe0, 0xdc, 0x63, 0x8f, 0x9c, 0x42, 0x95, 0xfc, 0x41, 0xbe, 0x00, 0x79, 0x77, 0x4d, + 0x5b, 0x63, 0xe0, 0xf6, 0x32, 0x9e, 0x99, 0x37, 0x9a, 0x97, 0x85, 0x76, 0x32, 0x22, 0x31, 0xf3, + 0x08, 0x9b, 0x50, 0x8f, 0xa4, 0x09, 0xcd, 0x85, 0x37, 0xeb, 0x7b, 0x31, 0xcd, 0x29, 0x4f, 0xb8, + 0x3b, 0x9e, 0x30, 0xc1, 0x50, 0x47, 0x72, 0xdc, 0x82, 0xe3, 0x2a, 0x8e, 0x3b, 0xeb, 0x1f, 0x3e, + 0xa8, 0x55, 0xea, 0xef, 0x52, 0x78, 0xd8, 0x89, 0x59, 0xcc, 0xe4, 0xe8, 0x15, 0x93, 0x42, 0xed, + 0xcb, 0x26, 0xdc, 0x3b, 0x51, 0x0b, 0x5e, 0x0b, 0x2c, 0x28, 0xa2, 0xf0, 0x96, 0x92, 0x71, 0x03, + 0x74, 0x37, 0x9d, 0xdd, 0xc1, 0x23, 0xb7, 0x6e, 0xa3, 0x7b, 0x1a, 0xd1, 0x5c, 0x24, 0x6f, 0x13, + 0x1a, 0x1d, 0x4b, 0x4c, 0xaa, 0x7d, 0xf3, 0x7c, 0x61, 0x35, 0xbe, 0xfd, 0xb4, 0x0e, 0x6a, 0x3f, + 0xf3, 0xa0, 0xf4, 0x46, 0x5f, 0x00, 0xbc, 0xa3, 0xe7, 0x90, 0xb0, 0x9c, 0xd3, 0x9c, 0x4f, 0xb9, + 0xb1, 0xf1, 0xaf, 0x8d, 0xca, 0xe8, 0xb8, 0x24, 0x2b, 0x47, 0x7f, 0x58, 0x6c, 0x5c, 0x2f, 0x2c, + 0x63, 0x8e, 0xb3, 0x74, 0x68, 0xff, 0xe1, 0x69, 0x17, 0x69, 0x94, 0x94, 0x57, 0xb4, 0x41, 0x9b, + 0x54, 0x70, 0xf4, 0x11, 0x96, 0x58, 0x98, 0x51, 0x81, 0x23, 0x2c, 0xb0, 0xb1, 0x29, 0x43, 0x79, + 0xff, 0xab, 0x41, 0xb7, 0xf8, 0x42, 0xcb, 0x7c, 0x4b, 0x07, 0xbb, 0x7b, 0x33, 0x58, 0x69, 0x6b, + 0x07, 0xfb, 0x1a, 0x2a, 0x15, 0x68, 0x08, 0x5b, 0x63, 0x3c, 0xc1, 0x19, 0x37, 0x9a, 0x5d, 0xe0, + 0xec, 0x0e, 0xee, 0xd7, 0xaf, 0x7c, 0x29, 0x39, 0x7e, 0xb3, 0xf0, 0x0f, 0xb4, 0x02, 0x3d, 0x83, + 0x6d, 0x32, 0xa1, 0x58, 0xd0, 0x30, 0x65, 0x04, 0xa7, 0x67, 0x8c, 0x0b, 0x63, 0xab, 0x0b, 0x9c, + 0x6d, 0xff, 0xe8, 0x5a, 0x86, 0x0a, 0xa3, 0xc8, 0x20, 0xa1, 0xe7, 0x25, 0x82, 0x5e, 0xc1, 0x4e, + 0x4e, 0x3f, 0x88, 0x50, 0xad, 0x0b, 0x39, 0x7d, 0x3f, 0xa5, 0x39, 0xa1, 0x46, 0xab, 0x0b, 0x9c, + 0xa6, 0x6f, 0xad, 0x17, 0xd6, 0x91, 0xf2, 0xaa, 0x63, 0xd9, 0x01, 0x2a, 0x60, 0x7d, 0xf1, 0x12, + 0x7c, 0x0a, 0xf7, 0x2b, 0xdd, 0xa0, 0x36, 0xdc, 0x7c, 0x47, 0xe7, 0x06, 0xe8, 0x02, 0x67, 0x2f, + 0x28, 0x46, 0xd4, 0x81, 0x5b, 0x33, 0x9c, 0x4e, 0xa9, 0xb1, 0x21, 0x31, 0xf5, 0x63, 0xd8, 0xfc, + 0xf4, 0xd5, 0x6a, 0xd8, 0xdf, 0x01, 0xbc, 0xf7, 0xd7, 0x9e, 0x51, 0x1f, 0xee, 0xe8, 0x18, 0x49, + 0x24, 0x1d, 0x77, 0xfc, 0xce, 0x7a, 0x61, 0xb5, 0xaf, 0xd7, 0x1e, 0x26, 0x91, 0x1d, 0x6c, 0xab, + 0xf9, 0x34, 0x42, 0x39, 0xd4, 0xdd, 0x5f, 0x1d, 0x59, 0xfd, 0xf3, 0x1e, 0xd6, 0x37, 0x5e, 0x3d, + 0xad, 0xa9, 0x4f, 0x7b, 0x70, 0x63, 0xc7, 0xd5, 0x65, 0x6f, 0x2b, 0xe4, 0x37, 0xff, 0xe4, 0x7c, + 0x69, 0x82, 0x8b, 0xa5, 0x09, 0x2e, 0x97, 0x26, 0xf8, 0xbc, 0x32, 0x1b, 0x17, 0x2b, 0xb3, 0xf1, + 0x63, 0x65, 0x36, 0xde, 0xf4, 0xe2, 0x44, 0x9c, 0x4d, 0x47, 0x2e, 0x61, 0x99, 0x47, 0x18, 0xcf, + 0x18, 0xf7, 0x92, 0x11, 0xe9, 0x95, 0x4f, 0xf9, 0xf1, 0xa0, 0xa7, 0x5f, 0xb3, 0x98, 0x8f, 0x29, + 0x1f, 0xb5, 0xe4, 0xa3, 0x7d, 0xf2, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x8f, 0x24, 0x27, 0x29, + 0x04, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextClientSequence != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.NextClientSequence)) + i-- + dAtA[i] = 0x30 + } + if m.CreateLocalhost { + i-- + if m.CreateLocalhost { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ClientsMetadata) > 0 { + for iNdEx := len(m.ClientsMetadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClientsMetadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ClientsConsensus) > 0 { + for iNdEx := len(m.ClientsConsensus) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClientsConsensus[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Clients) > 0 { + for iNdEx := len(m.Clients) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Clients[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GenesisMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IdentifiedGenesisMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IdentifiedGenesisMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IdentifiedGenesisMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ClientMetadata) > 0 { + for iNdEx := len(m.ClientMetadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClientMetadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Clients) > 0 { + for _, e := range m.Clients { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.ClientsConsensus) > 0 { + for _, e := range m.ClientsConsensus { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.ClientsMetadata) > 0 { + for _, e := range m.ClientsMetadata { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if m.CreateLocalhost { + n += 2 + } + if m.NextClientSequence != 0 { + n += 1 + sovGenesis(uint64(m.NextClientSequence)) + } + return n +} + +func (m *GenesisMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + return n +} + +func (m *IdentifiedGenesisMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + if len(m.ClientMetadata) > 0 { + for _, e := range m.ClientMetadata { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clients", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clients = append(m.Clients, IdentifiedClientState{}) + if err := m.Clients[len(m.Clients)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientsConsensus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientsConsensus = append(m.ClientsConsensus, ClientConsensusStates{}) + if err := m.ClientsConsensus[len(m.ClientsConsensus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientsMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientsMetadata = append(m.ClientsMetadata, IdentifiedGenesisMetadata{}) + if err := m.ClientsMetadata[len(m.ClientsMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateLocalhost", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CreateLocalhost = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextClientSequence", wireType) + } + m.NextClientSequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextClientSequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenesisMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IdentifiedGenesisMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IdentifiedGenesisMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IdentifiedGenesisMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientMetadata = append(m.ClientMetadata, GenesisMetadata{}) + if err := m.ClientMetadata[len(m.ClientMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/02-client/types/genesis_test.go b/core/02-client/types/genesis_test.go new file mode 100644 index 0000000000..d57b8d1ba5 --- /dev/null +++ b/core/02-client/types/genesis_test.go @@ -0,0 +1,549 @@ +package types_test + +import ( + "time" + + tmtypes "github.com/tendermint/tendermint/types" + + client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +const ( + chainID = "chainID" + tmClientID0 = "07-tendermint-0" + tmClientID1 = "07-tendermint-1" + invalidClientID = "myclient-0" + clientID = tmClientID0 + + height = 10 +) + +var clientHeight = types.NewHeight(0, 10) + +func (suite *TypesTestSuite) TestMarshalGenesisState() { + cdc := suite.chainA.App.AppCodec() + clientA, _, _, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + genesis := client.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.IBCKeeper.ClientKeeper) + + bz, err := cdc.MarshalJSON(&genesis) + suite.Require().NoError(err) + suite.Require().NotNil(bz) + + var gs types.GenesisState + err = cdc.UnmarshalJSON(bz, &gs) + suite.Require().NoError(err) +} + +func (suite *TypesTestSuite) TestValidateGenesis() { + privVal := ibctestingmock.NewPV() + pubKey, err := privVal.GetPubKey() + suite.Require().NoError(err) + + now := time.Now().UTC() + + val := tmtypes.NewValidator(pubKey, 10) + valSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{val}) + + heightMinus1 := types.NewHeight(0, height-1) + header := suite.chainA.CreateTMClientHeader(chainID, int64(clientHeight.RevisionHeight), heightMinus1, now, valSet, valSet, []tmtypes.PrivValidator{privVal}) + + testCases := []struct { + name string + genState types.GenesisState + expPass bool + }{ + { + name: "default", + genState: types.DefaultGenesisState(), + expPass: true, + }, + { + name: "valid custom genesis", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost+"-1", localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID0, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + []types.IdentifiedGenesisMetadata{ + types.NewIdentifiedGenesisMetadata( + clientID, + []types.GenesisMetadata{ + types.NewGenesisMetadata([]byte("key1"), []byte("val1")), + types.NewGenesisMetadata([]byte("key2"), []byte("val2")), + }, + ), + }, + types.NewParams(exported.Tendermint, exported.Localhost), + false, + 2, + ), + expPass: true, + }, + { + name: "invalid clientid", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + invalidClientID, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + invalidClientID, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Tendermint), + false, + 0, + ), + expPass: false, + }, + { + name: "invalid client", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState(exported.Localhost, localhosttypes.NewClientState("chaindID", types.ZeroHeight())), + }, + nil, + nil, + types.NewParams(exported.Tendermint), + false, + 0, + ), + expPass: false, + }, + { + name: "consensus state client id does not match client id in genesis clients", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chaindID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID1, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + types.NewHeight(0, 1), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Tendermint), + false, + 0, + ), + expPass: false, + }, + { + name: "invalid consensus state height", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chaindID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID0, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + types.ZeroHeight(), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Tendermint), + false, + 0, + ), + expPass: false, + }, + { + name: "invalid consensus state", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chaindID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID0, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + types.NewHeight(0, 1), + ibctmtypes.NewConsensusState( + time.Time{}, commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Tendermint), + false, + 0, + ), + expPass: false, + }, + { + name: "client in genesis clients is disallowed by params", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID0, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Solomachine), + false, + 0, + ), + expPass: false, + }, + { + name: "metadata client-id does not match a genesis client", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + clientID, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + clientID, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + []types.IdentifiedGenesisMetadata{ + types.NewIdentifiedGenesisMetadata( + "wrongclientid", + []types.GenesisMetadata{ + types.NewGenesisMetadata([]byte("key1"), []byte("val1")), + types.NewGenesisMetadata([]byte("key2"), []byte("val2")), + }, + ), + }, + types.NewParams(exported.Tendermint, exported.Localhost), + false, + 0, + ), + expPass: false, + }, + { + name: "invalid metadata", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + clientID, ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + clientID, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + []types.IdentifiedGenesisMetadata{ + types.NewIdentifiedGenesisMetadata( + clientID, + []types.GenesisMetadata{ + types.NewGenesisMetadata([]byte(""), []byte("val1")), + types.NewGenesisMetadata([]byte("key2"), []byte("val2")), + }, + ), + }, + types.NewParams(exported.Tendermint), + false, + 0, + ), + }, + { + name: "invalid params", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID0, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(" "), + false, + 0, + ), + expPass: false, + }, + { + name: "invalid param", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID0, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(" "), + true, + 0, + ), + expPass: false, + }, + { + name: "localhost client not registered on allowlist", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID1, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost+"-0", localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID1, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Tendermint), + true, + 2, + ), + expPass: false, + }, + { + name: "next sequence too small", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + tmClientID0, ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost+"-1", localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID0, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Tendermint, exported.Localhost), + false, + 0, + ), + expPass: false, + }, + { + name: "failed to parse client identifier in client state loop", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + "my-client", ibctmtypes.NewClientState(chainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + types.NewIdentifiedClientState( + exported.Localhost+"-1", localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + tmClientID0, + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Tendermint, exported.Localhost), + false, + 5, + ), + expPass: false, + }, + { + name: "consensus state different than client state type", + genState: types.NewGenesisState( + []types.IdentifiedClientState{ + types.NewIdentifiedClientState( + exported.Localhost+"-1", localhosttypes.NewClientState("chainID", clientHeight), + ), + }, + []types.ClientConsensusStates{ + types.NewClientConsensusStates( + exported.Localhost+"-1", + []types.ConsensusStateWithHeight{ + types.NewConsensusStateWithHeight( + header.GetHeight().(types.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + nil, + types.NewParams(exported.Tendermint, exported.Localhost), + false, + 5, + ), + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + err := tc.genState.Validate() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} diff --git a/core/02-client/types/height.go b/core/02-client/types/height.go new file mode 100644 index 0000000000..4216d54e66 --- /dev/null +++ b/core/02-client/types/height.go @@ -0,0 +1,188 @@ +package types + +import ( + "fmt" + "math/big" + "regexp" + "strconv" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.Height = (*Height)(nil) + +// IsRevisionFormat checks if a chainID is in the format required for parsing revisions +// The chainID must be in the form: `{chainID}-{revision} +// 24-host may enforce stricter checks on chainID +var IsRevisionFormat = regexp.MustCompile(`^.*[^-]-{1}[1-9][0-9]*$`).MatchString + +// ZeroHeight is a helper function which returns an uninitialized height. +func ZeroHeight() Height { + return Height{} +} + +// NewHeight is a constructor for the IBC height type +func NewHeight(revisionNumber, revisionHeight uint64) Height { + return Height{ + RevisionNumber: revisionNumber, + RevisionHeight: revisionHeight, + } +} + +// GetRevisionNumber returns the revision-number of the height +func (h Height) GetRevisionNumber() uint64 { + return h.RevisionNumber +} + +// GetRevisionHeight returns the revision-height of the height +func (h Height) GetRevisionHeight() uint64 { + return h.RevisionHeight +} + +// Compare implements a method to compare two heights. When comparing two heights a, b +// we can call a.Compare(b) which will return +// -1 if a < b +// 0 if a = b +// 1 if a > b +// +// It first compares based on revision numbers, whichever has the higher revision number is the higher height +// If revision number is the same, then the revision height is compared +func (h Height) Compare(other exported.Height) int64 { + height, ok := other.(Height) + if !ok { + panic(fmt.Sprintf("cannot compare against invalid height type: %T. expected height type: %T", other, h)) + } + var a, b big.Int + if h.RevisionNumber != height.RevisionNumber { + a.SetUint64(h.RevisionNumber) + b.SetUint64(height.RevisionNumber) + } else { + a.SetUint64(h.RevisionHeight) + b.SetUint64(height.RevisionHeight) + } + return int64(a.Cmp(&b)) +} + +// LT Helper comparison function returns true if h < other +func (h Height) LT(other exported.Height) bool { + return h.Compare(other) == -1 +} + +// LTE Helper comparison function returns true if h <= other +func (h Height) LTE(other exported.Height) bool { + cmp := h.Compare(other) + return cmp <= 0 +} + +// GT Helper comparison function returns true if h > other +func (h Height) GT(other exported.Height) bool { + return h.Compare(other) == 1 +} + +// GTE Helper comparison function returns true if h >= other +func (h Height) GTE(other exported.Height) bool { + cmp := h.Compare(other) + return cmp >= 0 +} + +// EQ Helper comparison function returns true if h == other +func (h Height) EQ(other exported.Height) bool { + return h.Compare(other) == 0 +} + +// String returns a string representation of Height +func (h Height) String() string { + return fmt.Sprintf("%d-%d", h.RevisionNumber, h.RevisionHeight) +} + +// Decrement will return a new height with the RevisionHeight decremented +// If the RevisionHeight is already at lowest value (1), then false success flag is returend +func (h Height) Decrement() (decremented exported.Height, success bool) { + if h.RevisionHeight == 0 { + return Height{}, false + } + return NewHeight(h.RevisionNumber, h.RevisionHeight-1), true +} + +// Increment will return a height with the same revision number but an +// incremented revision height +func (h Height) Increment() exported.Height { + return NewHeight(h.RevisionNumber, h.RevisionHeight+1) +} + +// IsZero returns true if height revision and revision-height are both 0 +func (h Height) IsZero() bool { + return h.RevisionNumber == 0 && h.RevisionHeight == 0 +} + +// MustParseHeight will attempt to parse a string representation of a height and panic if +// parsing fails. +func MustParseHeight(heightStr string) Height { + height, err := ParseHeight(heightStr) + if err != nil { + panic(err) + } + + return height +} + +// ParseHeight is a utility function that takes a string representation of the height +// and returns a Height struct +func ParseHeight(heightStr string) (Height, error) { + splitStr := strings.Split(heightStr, "-") + if len(splitStr) != 2 { + return Height{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "expected height string format: {revision}-{height}. Got: %s", heightStr) + } + revisionNumber, err := strconv.ParseUint(splitStr[0], 10, 64) + if err != nil { + return Height{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "invalid revision number. parse err: %s", err) + } + revisionHeight, err := strconv.ParseUint(splitStr[1], 10, 64) + if err != nil { + return Height{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "invalid revision height. parse err: %s", err) + } + return NewHeight(revisionNumber, revisionHeight), nil +} + +// SetRevisionNumber takes a chainID in valid revision format and swaps the revision number +// in the chainID with the given revision number. +func SetRevisionNumber(chainID string, revision uint64) (string, error) { + if !IsRevisionFormat(chainID) { + return "", sdkerrors.Wrapf( + sdkerrors.ErrInvalidChainID, "chainID is not in revision format: %s", chainID, + ) + } + + splitStr := strings.Split(chainID, "-") + // swap out revision number with given revision + splitStr[len(splitStr)-1] = strconv.Itoa(int(revision)) + return strings.Join(splitStr, "-"), nil +} + +// ParseChainID is a utility function that returns an revision number from the given ChainID. +// ParseChainID attempts to parse a chain id in the format: `{chainID}-{revision}` +// and return the revisionnumber as a uint64. +// If the chainID is not in the expected format, a default revision value of 0 is returned. +func ParseChainID(chainID string) uint64 { + if !IsRevisionFormat(chainID) { + // chainID is not in revision format, return 0 as default + return 0 + } + splitStr := strings.Split(chainID, "-") + revision, err := strconv.ParseUint(splitStr[len(splitStr)-1], 10, 64) + // sanity check: error should always be nil since regex only allows numbers in last element + if err != nil { + panic(fmt.Sprintf("regex allowed non-number value as last split element for chainID: %s", chainID)) + } + return revision +} + +// GetSelfHeight is a utility function that returns self height given context +// Revision number is retrieved from ctx.ChainID() +func GetSelfHeight(ctx sdk.Context) Height { + revision := ParseChainID(ctx.ChainID()) + return NewHeight(revision, uint64(ctx.BlockHeight())) +} diff --git a/core/02-client/types/height_test.go b/core/02-client/types/height_test.go new file mode 100644 index 0000000000..a455b7f58d --- /dev/null +++ b/core/02-client/types/height_test.go @@ -0,0 +1,155 @@ +package types_test + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +func TestZeroHeight(t *testing.T) { + require.Equal(t, types.Height{}, types.ZeroHeight()) +} + +func TestCompareHeights(t *testing.T) { + testCases := []struct { + name string + height1 types.Height + height2 types.Height + compareSign int64 + }{ + {"revision number 1 is lesser", types.NewHeight(1, 3), types.NewHeight(3, 4), -1}, + {"revision number 1 is greater", types.NewHeight(7, 5), types.NewHeight(4, 5), 1}, + {"revision height 1 is lesser", types.NewHeight(3, 4), types.NewHeight(3, 9), -1}, + {"revision height 1 is greater", types.NewHeight(3, 8), types.NewHeight(3, 3), 1}, + {"revision number is MaxUint64", types.NewHeight(math.MaxUint64, 1), types.NewHeight(0, 1), 1}, + {"revision height is MaxUint64", types.NewHeight(1, math.MaxUint64), types.NewHeight(1, 0), 1}, + {"height is equal", types.NewHeight(4, 4), types.NewHeight(4, 4), 0}, + } + + for i, tc := range testCases { + compare := tc.height1.Compare(tc.height2) + + switch tc.compareSign { + case -1: + require.True(t, compare == -1, "case %d: %s should return negative value on comparison, got: %d", + i, tc.name, compare) + case 0: + require.True(t, compare == 0, "case %d: %s should return zero on comparison, got: %d", + i, tc.name, compare) + case 1: + require.True(t, compare == 1, "case %d: %s should return positive value on comparison, got: %d", + i, tc.name, compare) + } + } +} + +func TestDecrement(t *testing.T) { + validDecrement := types.NewHeight(3, 3) + expected := types.NewHeight(3, 2) + + actual, success := validDecrement.Decrement() + require.Equal(t, expected, actual, "decrementing %s did not return expected height: %s. got %s", + validDecrement, expected, actual) + require.True(t, success, "decrement failed unexpectedly") + + invalidDecrement := types.NewHeight(3, 0) + actual, success = invalidDecrement.Decrement() + + require.Equal(t, types.ZeroHeight(), actual, "invalid decrement returned non-zero height: %s", actual) + require.False(t, success, "invalid decrement passed") +} + +func TestString(t *testing.T) { + _, err := types.ParseHeight("height") + require.Error(t, err, "invalid height string passed") + + _, err = types.ParseHeight("revision-10") + require.Error(t, err, "invalid revision string passed") + + _, err = types.ParseHeight("3-height") + require.Error(t, err, "invalid revision-height string passed") + + height := types.NewHeight(3, 4) + recovered, err := types.ParseHeight(height.String()) + + require.NoError(t, err, "valid height string could not be parsed") + require.Equal(t, height, recovered, "recovered height not equal to original height") + + parse, err := types.ParseHeight("3-10") + require.NoError(t, err, "parse err") + require.Equal(t, types.NewHeight(3, 10), parse, "parse height returns wrong height") +} + +func (suite *TypesTestSuite) TestMustParseHeight() { + suite.Require().Panics(func() { + types.MustParseHeight("height") + }) + + suite.Require().NotPanics(func() { + types.MustParseHeight("111-1") + }) + + suite.Require().NotPanics(func() { + types.MustParseHeight("0-0") + }) +} + +func TestParseChainID(t *testing.T) { + cases := []struct { + chainID string + revision uint64 + formatted bool + }{ + {"gaiamainnet-3", 3, true}, + {"a-1", 1, true}, + {"gaia-mainnet-40", 40, true}, + {"gaiamainnet-3-39", 39, true}, + {"gaiamainnet--", 0, false}, + {"gaiamainnet-03", 0, false}, + {"gaiamainnet--4", 0, false}, + {"gaiamainnet-3.4", 0, false}, + {"gaiamainnet", 0, false}, + {"a--1", 0, false}, + {"-1", 0, false}, + {"--1", 0, false}, + } + + for i, tc := range cases { + require.Equal(t, tc.formatted, types.IsRevisionFormat(tc.chainID), "id %s does not match expected format", tc.chainID) + + revision := types.ParseChainID(tc.chainID) + require.Equal(t, tc.revision, revision, "case %d returns incorrect revision", i) + } + +} + +func TestSetRevisionNumber(t *testing.T) { + // Test SetRevisionNumber + chainID, err := types.SetRevisionNumber("gaiamainnet", 3) + require.Error(t, err, "invalid revision format passed SetRevisionNumber") + require.Equal(t, "", chainID, "invalid revision format returned non-empty string on SetRevisionNumber") + chainID = "gaiamainnet-3" + + chainID, err = types.SetRevisionNumber(chainID, 4) + require.NoError(t, err, "valid revision format failed SetRevisionNumber") + require.Equal(t, "gaiamainnet-4", chainID, "valid revision format returned incorrect string on SetRevisionNumber") +} + +func (suite *TypesTestSuite) TestSelfHeight() { + ctx := suite.chainA.GetContext() + + // Test default revision + ctx = ctx.WithChainID("gaiamainnet") + ctx = ctx.WithBlockHeight(10) + height := types.GetSelfHeight(ctx) + suite.Require().Equal(types.NewHeight(0, 10), height, "default self height failed") + + // Test successful revision format + ctx = ctx.WithChainID("gaiamainnet-3") + ctx = ctx.WithBlockHeight(18) + height = types.GetSelfHeight(ctx) + suite.Require().Equal(types.NewHeight(3, 18), height, "valid self height failed") +} diff --git a/core/02-client/types/keys.go b/core/02-client/types/keys.go new file mode 100644 index 0000000000..321f5e3ffa --- /dev/null +++ b/core/02-client/types/keys.go @@ -0,0 +1,65 @@ +package types + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +const ( + // SubModuleName defines the IBC client name + SubModuleName string = "client" + + // RouterKey is the message route for IBC client + RouterKey string = SubModuleName + + // QuerierRoute is the querier route for IBC client + QuerierRoute string = SubModuleName + + // KeyNextClientSequence is the key used to store the next client sequence in + // the keeper. + KeyNextClientSequence = "nextClientSequence" +) + +// FormatClientIdentifier returns the client identifier with the sequence appended. +// This is a SDK specific format not enforced by IBC protocol. +func FormatClientIdentifier(clientType string, sequence uint64) string { + return fmt.Sprintf("%s-%d", clientType, sequence) +} + +// IsClientIDFormat checks if a clientID is in the format required on the SDK for +// parsing client identifiers. The client identifier must be in the form: `{client-type}-{N} +var IsClientIDFormat = regexp.MustCompile(`^.*[^-]-[0-9]{1,20}$`).MatchString + +// IsValidClientID checks if the clientID is valid and can be parsed into the client +// identifier format. +func IsValidClientID(clientID string) bool { + _, _, err := ParseClientIdentifier(clientID) + return err == nil +} + +// ParseClientIdentifier parses the client type and sequence from the client identifier. +func ParseClientIdentifier(clientID string) (string, uint64, error) { + if !IsClientIDFormat(clientID) { + return "", 0, sdkerrors.Wrapf(host.ErrInvalidID, "invalid client identifier %s is not in format: `{client-type}-{N}`", clientID) + } + + splitStr := strings.Split(clientID, "-") + lastIndex := len(splitStr) - 1 + + clientType := strings.Join(splitStr[:lastIndex], "-") + if strings.TrimSpace(clientType) == "" { + return "", 0, sdkerrors.Wrap(host.ErrInvalidID, "client identifier must be in format: `{client-type}-{N}` and client type cannot be blank") + } + + sequence, err := strconv.ParseUint(splitStr[lastIndex], 10, 64) + if err != nil { + return "", 0, sdkerrors.Wrap(err, "failed to parse client identifier sequence") + } + + return clientType, sequence, nil +} diff --git a/core/02-client/types/keys_test.go b/core/02-client/types/keys_test.go new file mode 100644 index 0000000000..4938145236 --- /dev/null +++ b/core/02-client/types/keys_test.go @@ -0,0 +1,54 @@ +package types_test + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" +) + +// tests ParseClientIdentifier and IsValidClientID +func TestParseClientIdentifier(t *testing.T) { + testCases := []struct { + name string + clientID string + clientType string + expSeq uint64 + expPass bool + }{ + {"valid 0", "tendermint-0", "tendermint", 0, true}, + {"valid 1", "tendermint-1", "tendermint", 1, true}, + {"valid solemachine", "solomachine-v1-1", "solomachine-v1", 1, true}, + {"valid large sequence", types.FormatClientIdentifier("tendermint", math.MaxUint64), "tendermint", math.MaxUint64, true}, + {"valid short client type", "t-0", "t", 0, true}, + // one above uint64 max + {"invalid uint64", "tendermint-18446744073709551616", "tendermint", 0, false}, + // uint64 == 20 characters + {"invalid large sequence", "tendermint-2345682193567182931243", "tendermint", 0, false}, + {"missing dash", "tendermint0", "tendermint", 0, false}, + {"blank id", " ", " ", 0, false}, + {"empty id", "", "", 0, false}, + {"negative sequence", "tendermint--1", "tendermint", 0, false}, + {"invalid format", "tendermint-tm", "tendermint", 0, false}, + {"empty clientype", " -100", "tendermint", 0, false}, + } + + for _, tc := range testCases { + + clientType, seq, err := types.ParseClientIdentifier(tc.clientID) + valid := types.IsValidClientID(tc.clientID) + require.Equal(t, tc.expSeq, seq, tc.clientID) + + if tc.expPass { + require.NoError(t, err, tc.name) + require.True(t, valid) + require.Equal(t, tc.clientType, clientType) + } else { + require.Error(t, err, tc.name, tc.clientID) + require.False(t, valid) + require.Equal(t, "", clientType) + } + } +} diff --git a/core/02-client/types/msgs.go b/core/02-client/types/msgs.go new file mode 100644 index 0000000000..1e884123d7 --- /dev/null +++ b/core/02-client/types/msgs.go @@ -0,0 +1,343 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// message types for the IBC client +const ( + TypeMsgCreateClient string = "create_client" + TypeMsgUpdateClient string = "update_client" + TypeMsgUpgradeClient string = "upgrade_client" + TypeMsgSubmitMisbehaviour string = "submit_misbehaviour" +) + +var ( + _ sdk.Msg = &MsgCreateClient{} + _ sdk.Msg = &MsgUpdateClient{} + _ sdk.Msg = &MsgSubmitMisbehaviour{} + _ sdk.Msg = &MsgUpgradeClient{} + + _ codectypes.UnpackInterfacesMessage = MsgCreateClient{} + _ codectypes.UnpackInterfacesMessage = MsgUpdateClient{} + _ codectypes.UnpackInterfacesMessage = MsgSubmitMisbehaviour{} + _ codectypes.UnpackInterfacesMessage = MsgUpgradeClient{} +) + +// NewMsgCreateClient creates a new MsgCreateClient instance +//nolint:interfacer +func NewMsgCreateClient( + clientState exported.ClientState, consensusState exported.ConsensusState, signer sdk.AccAddress, +) (*MsgCreateClient, error) { + + anyClientState, err := PackClientState(clientState) + if err != nil { + return nil, err + } + + anyConsensusState, err := PackConsensusState(consensusState) + if err != nil { + return nil, err + } + + return &MsgCreateClient{ + ClientState: anyClientState, + ConsensusState: anyConsensusState, + Signer: signer.String(), + }, nil +} + +// Route implements sdk.Msg +func (msg MsgCreateClient) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgCreateClient) Type() string { + return TypeMsgCreateClient +} + +// ValidateBasic implements sdk.Msg +func (msg MsgCreateClient) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + clientState, err := UnpackClientState(msg.ClientState) + if err != nil { + return err + } + if err := clientState.Validate(); err != nil { + return err + } + if clientState.ClientType() == exported.Localhost { + return sdkerrors.Wrap(ErrInvalidClient, "localhost client can only be created on chain initialization") + } + consensusState, err := UnpackConsensusState(msg.ConsensusState) + if err != nil { + return err + } + if clientState.ClientType() != consensusState.ClientType() { + return sdkerrors.Wrap(ErrInvalidClientType, "client type for client state and consensus state do not match") + } + if err := ValidateClientType(clientState.ClientType()); err != nil { + return sdkerrors.Wrap(err, "client type does not meet naming constraints") + } + return consensusState.ValidateBasic() +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgCreateClient) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgCreateClient) GetSigners() []sdk.AccAddress { + accAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{accAddr} +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (msg MsgCreateClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + var clientState exported.ClientState + err := unpacker.UnpackAny(msg.ClientState, &clientState) + if err != nil { + return err + } + + var consensusState exported.ConsensusState + return unpacker.UnpackAny(msg.ConsensusState, &consensusState) +} + +// NewMsgUpdateClient creates a new MsgUpdateClient instance +//nolint:interfacer +func NewMsgUpdateClient(id string, header exported.Header, signer sdk.AccAddress) (*MsgUpdateClient, error) { + anyHeader, err := PackHeader(header) + if err != nil { + return nil, err + } + + return &MsgUpdateClient{ + ClientId: id, + Header: anyHeader, + Signer: signer.String(), + }, nil +} + +// Route implements sdk.Msg +func (msg MsgUpdateClient) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgUpdateClient) Type() string { + return TypeMsgUpdateClient +} + +// ValidateBasic implements sdk.Msg +func (msg MsgUpdateClient) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + header, err := UnpackHeader(msg.Header) + if err != nil { + return err + } + if err := header.ValidateBasic(); err != nil { + return err + } + if msg.ClientId == exported.Localhost { + return sdkerrors.Wrap(ErrInvalidClient, "localhost client is only updated on ABCI BeginBlock") + } + return host.ClientIdentifierValidator(msg.ClientId) +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgUpdateClient) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgUpdateClient) GetSigners() []sdk.AccAddress { + accAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{accAddr} +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (msg MsgUpdateClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + var header exported.Header + return unpacker.UnpackAny(msg.Header, &header) +} + +// NewMsgUpgradeClient creates a new MsgUpgradeClient instance +// nolint: interfacer +func NewMsgUpgradeClient(clientID string, clientState exported.ClientState, consState exported.ConsensusState, + proofUpgradeClient, proofUpgradeConsState []byte, signer sdk.AccAddress) (*MsgUpgradeClient, error) { + anyClient, err := PackClientState(clientState) + if err != nil { + return nil, err + } + anyConsState, err := PackConsensusState(consState) + if err != nil { + return nil, err + } + + return &MsgUpgradeClient{ + ClientId: clientID, + ClientState: anyClient, + ConsensusState: anyConsState, + ProofUpgradeClient: proofUpgradeClient, + ProofUpgradeConsensusState: proofUpgradeConsState, + Signer: signer.String(), + }, nil +} + +// Route implements sdk.Msg +func (msg MsgUpgradeClient) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgUpgradeClient) Type() string { + return TypeMsgUpgradeClient +} + +// ValidateBasic implements sdk.Msg +func (msg MsgUpgradeClient) ValidateBasic() error { + // will not validate client state as committed client may not form a valid client state. + // client implementations are responsible for ensuring final upgraded client is valid. + clientState, err := UnpackClientState(msg.ClientState) + if err != nil { + return err + } + // will not validate consensus state here since the trusted kernel may not form a valid consenus state. + // client implementations are responsible for ensuring client can submit new headers against this consensus state. + consensusState, err := UnpackConsensusState(msg.ConsensusState) + if err != nil { + return err + } + + if clientState.ClientType() != consensusState.ClientType() { + return sdkerrors.Wrapf(ErrInvalidUpgradeClient, "consensus state's client-type does not match client. expected: %s, got: %s", + clientState.ClientType(), consensusState.ClientType()) + } + if len(msg.ProofUpgradeClient) == 0 { + return sdkerrors.Wrap(ErrInvalidUpgradeClient, "proof of upgrade client cannot be empty") + } + if len(msg.ProofUpgradeConsensusState) == 0 { + return sdkerrors.Wrap(ErrInvalidUpgradeClient, "proof of upgrade consensus state cannot be empty") + } + _, err = sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return host.ClientIdentifierValidator(msg.ClientId) +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgUpgradeClient) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgUpgradeClient) GetSigners() []sdk.AccAddress { + accAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{accAddr} +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (msg MsgUpgradeClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + var ( + clientState exported.ClientState + consState exported.ConsensusState + ) + if err := unpacker.UnpackAny(msg.ClientState, &clientState); err != nil { + return err + } + return unpacker.UnpackAny(msg.ConsensusState, &consState) +} + +// NewMsgSubmitMisbehaviour creates a new MsgSubmitMisbehaviour instance. +//nolint:interfacer +func NewMsgSubmitMisbehaviour(clientID string, misbehaviour exported.Misbehaviour, signer sdk.AccAddress) (*MsgSubmitMisbehaviour, error) { + anyMisbehaviour, err := PackMisbehaviour(misbehaviour) + if err != nil { + return nil, err + } + + return &MsgSubmitMisbehaviour{ + ClientId: clientID, + Misbehaviour: anyMisbehaviour, + Signer: signer.String(), + }, nil +} + +// Route returns the MsgSubmitClientMisbehaviour's route. +func (msg MsgSubmitMisbehaviour) Route() string { return host.RouterKey } + +// Type returns the MsgSubmitMisbehaviour's type. +func (msg MsgSubmitMisbehaviour) Type() string { + return TypeMsgSubmitMisbehaviour +} + +// ValidateBasic performs basic (non-state-dependant) validation on a MsgSubmitMisbehaviour. +func (msg MsgSubmitMisbehaviour) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + misbehaviour, err := UnpackMisbehaviour(msg.Misbehaviour) + if err != nil { + return err + } + if err := misbehaviour.ValidateBasic(); err != nil { + return err + } + if misbehaviour.GetClientID() != msg.ClientId { + return sdkerrors.Wrapf( + ErrInvalidMisbehaviour, + "misbehaviour client-id doesn't match client-id from message (%s ≠ %s)", + misbehaviour.GetClientID(), msg.ClientId, + ) + } + + return host.ClientIdentifierValidator(msg.ClientId) +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgSubmitMisbehaviour) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners returns the single expected signer for a MsgSubmitMisbehaviour. +func (msg MsgSubmitMisbehaviour) GetSigners() []sdk.AccAddress { + accAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{accAddr} +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (msg MsgSubmitMisbehaviour) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + var misbehaviour exported.Misbehaviour + return unpacker.UnpackAny(msg.Misbehaviour, &misbehaviour) +} diff --git a/core/02-client/types/msgs_test.go b/core/02-client/types/msgs_test.go new file mode 100644 index 0000000000..e42725bae2 --- /dev/null +++ b/core/02-client/types/msgs_test.go @@ -0,0 +1,619 @@ +package types_test + +import ( + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type TypesTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +func (suite *TypesTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +} + +func TestTypesTestSuite(t *testing.T) { + suite.Run(t, new(TypesTestSuite)) +} + +// tests that different clients within MsgCreateClient can be marshaled +// and unmarshaled. +func (suite *TypesTestSuite) TestMarshalMsgCreateClient() { + var ( + msg *types.MsgCreateClient + err error + ) + + testCases := []struct { + name string + malleate func() + }{ + { + "solo machine client", func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + }, + { + "tendermint client", func() { + tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + tc.malleate() + + cdc := suite.chainA.App.AppCodec() + + // marshal message + bz, err := cdc.MarshalJSON(msg) + suite.Require().NoError(err) + + // unmarshal message + newMsg := &types.MsgCreateClient{} + err = cdc.UnmarshalJSON(bz, newMsg) + suite.Require().NoError(err) + + suite.Require().True(proto.Equal(msg, newMsg)) + }) + } +} + +func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { + var ( + msg = &types.MsgCreateClient{} + err error + ) + + cases := []struct { + name string + malleate func() + expPass bool + }{ + { + "valid - tendermint client", + func() { + tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + true, + }, + { + "invalid tendermint client", + func() { + msg, err = types.NewMsgCreateClient(&ibctmtypes.ClientState{}, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + { + "failed to unpack client", + func() { + msg.ClientState = nil + }, + false, + }, + { + "failed to unpack consensus state", + func() { + tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + msg.ConsensusState = nil + }, + false, + }, + { + "invalid signer", + func() { + msg.Signer = "" + }, + false, + }, + { + "valid - solomachine client", + func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + true, + }, + { + "invalid solomachine client", + func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(&solomachinetypes.ClientState{}, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + { + "invalid solomachine consensus state", + func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), &solomachinetypes.ConsensusState{}, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + { + "invalid - client state and consensus state client types do not match", + func() { + tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(tendermintClient, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + } + + for _, tc := range cases { + tc.malleate() + err = msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +// tests that different header within MsgUpdateClient can be marshaled +// and unmarshaled. +func (suite *TypesTestSuite) TestMarshalMsgUpdateClient() { + var ( + msg *types.MsgUpdateClient + err error + ) + + testCases := []struct { + name string + malleate func() + }{ + { + "solo machine client", func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + }, + { + "tendermint client", func() { + msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + + }, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + tc.malleate() + + cdc := suite.chainA.App.AppCodec() + + // marshal message + bz, err := cdc.MarshalJSON(msg) + suite.Require().NoError(err) + + // unmarshal message + newMsg := &types.MsgUpdateClient{} + err = cdc.UnmarshalJSON(bz, newMsg) + suite.Require().NoError(err) + + suite.Require().True(proto.Equal(msg, newMsg)) + }) + } +} + +func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() { + var ( + msg = &types.MsgUpdateClient{} + err error + ) + + cases := []struct { + name string + malleate func() + expPass bool + }{ + { + "invalid client-id", + func() { + msg.ClientId = "" + }, + false, + }, + { + "valid - tendermint header", + func() { + msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + true, + }, + { + "invalid tendermint header", + func() { + msg, err = types.NewMsgUpdateClient("tendermint", &ibctmtypes.Header{}, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + { + "failed to unpack header", + func() { + msg.Header = nil + }, + false, + }, + { + "invalid signer", + func() { + msg.Signer = "" + }, + false, + }, + { + "valid - solomachine header", + func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + true, + }, + { + "invalid solomachine header", + func() { + msg, err = types.NewMsgUpdateClient("solomachine", &solomachinetypes.Header{}, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + { + "unsupported - localhost", + func() { + msg, err = types.NewMsgUpdateClient(exported.Localhost, suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + } + + for _, tc := range cases { + tc.malleate() + err = msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *TypesTestSuite) TestMarshalMsgUpgradeClient() { + var ( + msg *types.MsgUpgradeClient + err error + ) + + testCases := []struct { + name string + malleate func() + }{ + { + "client upgrades to new tendermint client", + func() { + tendermintClient := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + tendermintConsState := &ibctmtypes.ConsensusState{NextValidatorsHash: []byte("nextValsHash")} + msg, err = types.NewMsgUpgradeClient("clientid", tendermintClient, tendermintConsState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + }, + { + "client upgrades to new solomachine client", + func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 1) + msg, err = types.NewMsgUpgradeClient("clientid", soloMachine.ClientState(), soloMachine.ConsensusState(), []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + tc.malleate() + + cdc := suite.chainA.App.AppCodec() + + // marshal message + bz, err := cdc.MarshalJSON(msg) + suite.Require().NoError(err) + + // unmarshal message + newMsg := &types.MsgUpgradeClient{} + err = cdc.UnmarshalJSON(bz, newMsg) + suite.Require().NoError(err) + }) + } +} + +func (suite *TypesTestSuite) TestMsgUpgradeClient_ValidateBasic() { + cases := []struct { + name string + malleate func(*types.MsgUpgradeClient) + expPass bool + }{ + { + name: "success", + malleate: func(msg *types.MsgUpgradeClient) {}, + expPass: true, + }, + { + name: "client id empty", + malleate: func(msg *types.MsgUpgradeClient) { + msg.ClientId = "" + }, + expPass: false, + }, + { + name: "invalid client id", + malleate: func(msg *types.MsgUpgradeClient) { + msg.ClientId = "invalid~chain/id" + }, + expPass: false, + }, + { + name: "unpacking clientstate fails", + malleate: func(msg *types.MsgUpgradeClient) { + msg.ClientState = nil + }, + expPass: false, + }, + { + name: "unpacking consensus state fails", + malleate: func(msg *types.MsgUpgradeClient) { + msg.ConsensusState = nil + }, + expPass: false, + }, + { + name: "client and consensus type does not match", + malleate: func(msg *types.MsgUpgradeClient) { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + soloConsensus, err := types.PackConsensusState(soloMachine.ConsensusState()) + suite.Require().NoError(err) + msg.ConsensusState = soloConsensus + }, + expPass: false, + }, + { + name: "empty client proof", + malleate: func(msg *types.MsgUpgradeClient) { + msg.ProofUpgradeClient = nil + }, + expPass: false, + }, + { + name: "empty consensus state proof", + malleate: func(msg *types.MsgUpgradeClient) { + msg.ProofUpgradeConsensusState = nil + }, + expPass: false, + }, + { + name: "empty signer", + malleate: func(msg *types.MsgUpgradeClient) { + msg.Signer = " " + }, + expPass: false, + }, + } + + for _, tc := range cases { + tc := tc + + clientState := ibctmtypes.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + consState := &ibctmtypes.ConsensusState{NextValidatorsHash: []byte("nextValsHash")} + msg, err := types.NewMsgUpgradeClient("testclientid", clientState, consState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + + tc.malleate(msg) + err = msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err, "valid case %s failed", tc.name) + } else { + suite.Require().Error(err, "invalid case %s passed", tc.name) + } + } +} + +// tests that different misbehaviours within MsgSubmitMisbehaviour can be marshaled +// and unmarshaled. +func (suite *TypesTestSuite) TestMarshalMsgSubmitMisbehaviour() { + var ( + msg *types.MsgSubmitMisbehaviour + err error + ) + + testCases := []struct { + name string + malleate func() + }{ + { + "solo machine client", func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + }, + { + "tendermint client", func() { + height := types.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height)) + heightMinus1 := types.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height)-1) + header1 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) + header2 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time.Add(time.Minute), suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) + + misbehaviour := ibctmtypes.NewMisbehaviour("tendermint", header1, header2) + msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + + }, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + tc.malleate() + + cdc := suite.chainA.App.AppCodec() + + // marshal message + bz, err := cdc.MarshalJSON(msg) + suite.Require().NoError(err) + + // unmarshal message + newMsg := &types.MsgSubmitMisbehaviour{} + err = cdc.UnmarshalJSON(bz, newMsg) + suite.Require().NoError(err) + + suite.Require().True(proto.Equal(msg, newMsg)) + }) + } +} + +func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() { + var ( + msg = &types.MsgSubmitMisbehaviour{} + err error + ) + + cases := []struct { + name string + malleate func() + expPass bool + }{ + { + "invalid client-id", + func() { + msg.ClientId = "" + }, + false, + }, + { + "valid - tendermint misbehaviour", + func() { + height := types.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height)) + heightMinus1 := types.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height)-1) + header1 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) + header2 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.CurrentHeader.Time.Add(time.Minute), suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) + + misbehaviour := ibctmtypes.NewMisbehaviour("tendermint", header1, header2) + msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + true, + }, + { + "invalid tendermint misbehaviour", + func() { + msg, err = types.NewMsgSubmitMisbehaviour("tendermint", &ibctmtypes.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + { + "failed to unpack misbehaviourt", + func() { + msg.Misbehaviour = nil + }, + false, + }, + { + "invalid signer", + func() { + msg.Signer = "" + }, + false, + }, + { + "valid - solomachine misbehaviour", + func() { + soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + true, + }, + { + "invalid solomachine misbehaviour", + func() { + msg, err = types.NewMsgSubmitMisbehaviour("solomachine", &solomachinetypes.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + { + "client-id mismatch", + func() { + soloMachineMisbehaviour := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateMisbehaviour() + msg, err = types.NewMsgSubmitMisbehaviour("external", soloMachineMisbehaviour, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + false, + }, + } + + for _, tc := range cases { + tc.malleate() + err = msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} diff --git a/core/02-client/types/params.go b/core/02-client/types/params.go new file mode 100644 index 0000000000..6477e3f6f4 --- /dev/null +++ b/core/02-client/types/params.go @@ -0,0 +1,71 @@ +package types + +import ( + "fmt" + "strings" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var ( + // DefaultAllowedClients are "06-solomachine" and "07-tendermint" + DefaultAllowedClients = []string{exported.Solomachine, exported.Tendermint} + + // KeyAllowedClients is store's key for AllowedClients Params + KeyAllowedClients = []byte("AllowedClients") +) + +// ParamKeyTable type declaration for parameters +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +// NewParams creates a new parameter configuration for the ibc transfer module +func NewParams(allowedClients ...string) Params { + return Params{ + AllowedClients: allowedClients, + } +} + +// DefaultParams is the default parameter configuration for the ibc-transfer module +func DefaultParams() Params { + return NewParams(DefaultAllowedClients...) +} + +// Validate all ibc-transfer module parameters +func (p Params) Validate() error { + return validateClients(p.AllowedClients) +} + +// ParamSetPairs implements params.ParamSet +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair(KeyAllowedClients, p.AllowedClients, validateClients), + } +} + +// IsAllowedClient checks if the given client type is registered on the allowlist. +func (p Params) IsAllowedClient(clientType string) bool { + for _, allowedClient := range p.AllowedClients { + if allowedClient == clientType { + return true + } + } + return false +} + +func validateClients(i interface{}) error { + clients, ok := i.([]string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + for i, clientType := range clients { + if strings.TrimSpace(clientType) == "" { + return fmt.Errorf("client type %d cannot be blank", i) + } + } + + return nil +} diff --git a/core/02-client/types/params_test.go b/core/02-client/types/params_test.go new file mode 100644 index 0000000000..dac80a4b42 --- /dev/null +++ b/core/02-client/types/params_test.go @@ -0,0 +1,30 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +func TestValidateParams(t *testing.T) { + testCases := []struct { + name string + params Params + expPass bool + }{ + {"default params", DefaultParams(), true}, + {"custom params", NewParams(exported.Tendermint), true}, + {"blank client", NewParams(" "), false}, + } + + for _, tc := range testCases { + err := tc.params.Validate() + if tc.expPass { + require.NoError(t, err, tc.name) + } else { + require.Error(t, err, tc.name) + } + } +} diff --git a/core/02-client/types/proposal.go b/core/02-client/types/proposal.go new file mode 100644 index 0000000000..95b10aaf40 --- /dev/null +++ b/core/02-client/types/proposal.go @@ -0,0 +1,64 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" +) + +const ( + // ProposalTypeClientUpdate defines the type for a ClientUpdateProposal + ProposalTypeClientUpdate = "ClientUpdate" +) + +var _ govtypes.Content = &ClientUpdateProposal{} + +func init() { + govtypes.RegisterProposalType(ProposalTypeClientUpdate) +} + +// NewClientUpdateProposal creates a new client update proposal. +func NewClientUpdateProposal(title, description, subjectClientID, substituteClientID string, initialHeight Height) *ClientUpdateProposal { + return &ClientUpdateProposal{ + Title: title, + Description: description, + SubjectClientId: subjectClientID, + SubstituteClientId: substituteClientID, + InitialHeight: initialHeight, + } +} + +// GetTitle returns the title of a client update proposal. +func (cup *ClientUpdateProposal) GetTitle() string { return cup.Title } + +// GetDescription returns the description of a client update proposal. +func (cup *ClientUpdateProposal) GetDescription() string { return cup.Description } + +// ProposalRoute returns the routing key of a client update proposal. +func (cup *ClientUpdateProposal) ProposalRoute() string { return RouterKey } + +// ProposalType returns the type of a client update proposal. +func (cup *ClientUpdateProposal) ProposalType() string { return ProposalTypeClientUpdate } + +// ValidateBasic runs basic stateless validity checks +func (cup *ClientUpdateProposal) ValidateBasic() error { + err := govtypes.ValidateAbstract(cup) + if err != nil { + return err + } + + if cup.SubjectClientId == cup.SubstituteClientId { + return sdkerrors.Wrap(ErrInvalidSubstitute, "subject and substitute client identifiers are equal") + } + if _, _, err := ParseClientIdentifier(cup.SubjectClientId); err != nil { + return err + } + if _, _, err := ParseClientIdentifier(cup.SubstituteClientId); err != nil { + return err + } + + if cup.InitialHeight.IsZero() { + return sdkerrors.Wrap(ErrInvalidHeight, "initial height cannot be zero height") + } + + return nil +} diff --git a/core/02-client/types/proposal_test.go b/core/02-client/types/proposal_test.go new file mode 100644 index 0000000000..597e5cf8f8 --- /dev/null +++ b/core/02-client/types/proposal_test.go @@ -0,0 +1,86 @@ +package types_test + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *TypesTestSuite) TestValidateBasic() { + subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + subjectClientState := suite.chainA.GetClientState(subject) + substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + initialHeight := types.NewHeight(subjectClientState.GetLatestHeight().GetRevisionNumber(), subjectClientState.GetLatestHeight().GetRevisionHeight()+1) + + testCases := []struct { + name string + proposal govtypes.Content + expPass bool + }{ + { + "success", + types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, initialHeight), + true, + }, + { + "fails validate abstract - empty title", + types.NewClientUpdateProposal("", ibctesting.Description, subject, substitute, initialHeight), + false, + }, + { + "subject and substitute use the same identifier", + types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, subject, initialHeight), + false, + }, + { + "invalid subject clientID", + types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, ibctesting.InvalidID, substitute, initialHeight), + false, + }, + { + "invalid substitute clientID", + types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, ibctesting.InvalidID, initialHeight), + false, + }, + { + "initial height is zero", + types.NewClientUpdateProposal(ibctesting.Title, ibctesting.Description, subject, substitute, types.ZeroHeight()), + false, + }, + } + + for _, tc := range testCases { + + err := tc.proposal.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +// tests a client update proposal can be marshaled and unmarshaled +func (suite *TypesTestSuite) TestMarshalClientUpdateProposalProposal() { + // create proposal + proposal := types.NewClientUpdateProposal("update IBC client", "description", "subject", "substitute", types.NewHeight(1, 0)) + + // create codec + ir := codectypes.NewInterfaceRegistry() + types.RegisterInterfaces(ir) + govtypes.RegisterInterfaces(ir) + cdc := codec.NewProtoCodec(ir) + + // marshal message + bz, err := cdc.MarshalJSON(proposal) + suite.Require().NoError(err) + + // unmarshal proposal + newProposal := &types.ClientUpdateProposal{} + err = cdc.UnmarshalJSON(bz, newProposal) + suite.Require().NoError(err) +} diff --git a/core/02-client/types/query.go b/core/02-client/types/query.go new file mode 100644 index 0000000000..c46bbfcfe7 --- /dev/null +++ b/core/02-client/types/query.go @@ -0,0 +1,65 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + _ codectypes.UnpackInterfacesMessage = QueryClientStateResponse{} + _ codectypes.UnpackInterfacesMessage = QueryClientStatesResponse{} + _ codectypes.UnpackInterfacesMessage = QueryConsensusStateResponse{} + _ codectypes.UnpackInterfacesMessage = QueryConsensusStatesResponse{} +) + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (qcsr QueryClientStatesResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + for _, cs := range qcsr.ClientStates { + if err := cs.UnpackInterfaces(unpacker); err != nil { + return err + } + } + return nil +} + +// NewQueryClientStateResponse creates a new QueryClientStateResponse instance. +func NewQueryClientStateResponse( + clientStateAny *codectypes.Any, proof []byte, height Height, +) *QueryClientStateResponse { + return &QueryClientStateResponse{ + ClientState: clientStateAny, + Proof: proof, + ProofHeight: height, + } +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (qcsr QueryClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(qcsr.ClientState, new(exported.ClientState)) +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (qcsr QueryConsensusStatesResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + for _, cs := range qcsr.ConsensusStates { + if err := cs.UnpackInterfaces(unpacker); err != nil { + return err + } + } + return nil +} + +// NewQueryConsensusStateResponse creates a new QueryConsensusStateResponse instance. +func NewQueryConsensusStateResponse( + consensusStateAny *codectypes.Any, proof []byte, height Height, +) *QueryConsensusStateResponse { + return &QueryConsensusStateResponse{ + ConsensusState: consensusStateAny, + Proof: proof, + ProofHeight: height, + } +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (qcsr QueryConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(qcsr.ConsensusState, new(exported.ConsensusState)) +} diff --git a/core/02-client/types/query.pb.go b/core/02-client/types/query.pb.go new file mode 100644 index 0000000000..bf74f2eba3 --- /dev/null +++ b/core/02-client/types/query.pb.go @@ -0,0 +1,2685 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/client/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/codec/types" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryClientStateRequest is the request type for the Query/ClientState RPC +// method +type QueryClientStateRequest struct { + // client state unique identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` +} + +func (m *QueryClientStateRequest) Reset() { *m = QueryClientStateRequest{} } +func (m *QueryClientStateRequest) String() string { return proto.CompactTextString(m) } +func (*QueryClientStateRequest) ProtoMessage() {} +func (*QueryClientStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{0} +} +func (m *QueryClientStateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClientStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClientStateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClientStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClientStateRequest.Merge(m, src) +} +func (m *QueryClientStateRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryClientStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClientStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClientStateRequest proto.InternalMessageInfo + +func (m *QueryClientStateRequest) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +// QueryClientStateResponse is the response type for the Query/ClientState RPC +// method. Besides the client state, it includes a proof and the height from +// which the proof was retrieved. +type QueryClientStateResponse struct { + // client state associated with the request identifier + ClientState *types.Any `protobuf:"bytes,1,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryClientStateResponse) Reset() { *m = QueryClientStateResponse{} } +func (m *QueryClientStateResponse) String() string { return proto.CompactTextString(m) } +func (*QueryClientStateResponse) ProtoMessage() {} +func (*QueryClientStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{1} +} +func (m *QueryClientStateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClientStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClientStateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClientStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClientStateResponse.Merge(m, src) +} +func (m *QueryClientStateResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryClientStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClientStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClientStateResponse proto.InternalMessageInfo + +func (m *QueryClientStateResponse) GetClientState() *types.Any { + if m != nil { + return m.ClientState + } + return nil +} + +func (m *QueryClientStateResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryClientStateResponse) GetProofHeight() Height { + if m != nil { + return m.ProofHeight + } + return Height{} +} + +// QueryClientStatesRequest is the request type for the Query/ClientStates RPC +// method +type QueryClientStatesRequest struct { + // pagination request + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryClientStatesRequest) Reset() { *m = QueryClientStatesRequest{} } +func (m *QueryClientStatesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryClientStatesRequest) ProtoMessage() {} +func (*QueryClientStatesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{2} +} +func (m *QueryClientStatesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClientStatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClientStatesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClientStatesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClientStatesRequest.Merge(m, src) +} +func (m *QueryClientStatesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryClientStatesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClientStatesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClientStatesRequest proto.InternalMessageInfo + +func (m *QueryClientStatesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryClientStatesResponse is the response type for the Query/ClientStates RPC +// method. +type QueryClientStatesResponse struct { + // list of stored ClientStates of the chain. + ClientStates IdentifiedClientStates `protobuf:"bytes,1,rep,name=client_states,json=clientStates,proto3,castrepeated=IdentifiedClientStates" json:"client_states"` + // pagination response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryClientStatesResponse) Reset() { *m = QueryClientStatesResponse{} } +func (m *QueryClientStatesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryClientStatesResponse) ProtoMessage() {} +func (*QueryClientStatesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{3} +} +func (m *QueryClientStatesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClientStatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClientStatesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClientStatesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClientStatesResponse.Merge(m, src) +} +func (m *QueryClientStatesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryClientStatesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClientStatesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClientStatesResponse proto.InternalMessageInfo + +func (m *QueryClientStatesResponse) GetClientStates() IdentifiedClientStates { + if m != nil { + return m.ClientStates + } + return nil +} + +func (m *QueryClientStatesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryConsensusStateRequest is the request type for the Query/ConsensusState +// RPC method. Besides the consensus state, it includes a proof and the height +// from which the proof was retrieved. +type QueryConsensusStateRequest struct { + // client identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // consensus state revision number + RevisionNumber uint64 `protobuf:"varint,2,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // consensus state revision height + RevisionHeight uint64 `protobuf:"varint,3,opt,name=revision_height,json=revisionHeight,proto3" json:"revision_height,omitempty"` + // latest_height overrrides the height field and queries the latest stored + // ConsensusState + LatestHeight bool `protobuf:"varint,4,opt,name=latest_height,json=latestHeight,proto3" json:"latest_height,omitempty"` +} + +func (m *QueryConsensusStateRequest) Reset() { *m = QueryConsensusStateRequest{} } +func (m *QueryConsensusStateRequest) String() string { return proto.CompactTextString(m) } +func (*QueryConsensusStateRequest) ProtoMessage() {} +func (*QueryConsensusStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{4} +} +func (m *QueryConsensusStateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConsensusStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConsensusStateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConsensusStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConsensusStateRequest.Merge(m, src) +} +func (m *QueryConsensusStateRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryConsensusStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConsensusStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConsensusStateRequest proto.InternalMessageInfo + +func (m *QueryConsensusStateRequest) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *QueryConsensusStateRequest) GetRevisionNumber() uint64 { + if m != nil { + return m.RevisionNumber + } + return 0 +} + +func (m *QueryConsensusStateRequest) GetRevisionHeight() uint64 { + if m != nil { + return m.RevisionHeight + } + return 0 +} + +func (m *QueryConsensusStateRequest) GetLatestHeight() bool { + if m != nil { + return m.LatestHeight + } + return false +} + +// QueryConsensusStateResponse is the response type for the Query/ConsensusState +// RPC method +type QueryConsensusStateResponse struct { + // consensus state associated with the client identifier at the given height + ConsensusState *types.Any `protobuf:"bytes,1,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryConsensusStateResponse) Reset() { *m = QueryConsensusStateResponse{} } +func (m *QueryConsensusStateResponse) String() string { return proto.CompactTextString(m) } +func (*QueryConsensusStateResponse) ProtoMessage() {} +func (*QueryConsensusStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{5} +} +func (m *QueryConsensusStateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConsensusStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConsensusStateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConsensusStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConsensusStateResponse.Merge(m, src) +} +func (m *QueryConsensusStateResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryConsensusStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConsensusStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConsensusStateResponse proto.InternalMessageInfo + +func (m *QueryConsensusStateResponse) GetConsensusState() *types.Any { + if m != nil { + return m.ConsensusState + } + return nil +} + +func (m *QueryConsensusStateResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryConsensusStateResponse) GetProofHeight() Height { + if m != nil { + return m.ProofHeight + } + return Height{} +} + +// QueryConsensusStatesRequest is the request type for the Query/ConsensusStates +// RPC method. +type QueryConsensusStatesRequest struct { + // client identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // pagination request + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryConsensusStatesRequest) Reset() { *m = QueryConsensusStatesRequest{} } +func (m *QueryConsensusStatesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryConsensusStatesRequest) ProtoMessage() {} +func (*QueryConsensusStatesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{6} +} +func (m *QueryConsensusStatesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConsensusStatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConsensusStatesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConsensusStatesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConsensusStatesRequest.Merge(m, src) +} +func (m *QueryConsensusStatesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryConsensusStatesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConsensusStatesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConsensusStatesRequest proto.InternalMessageInfo + +func (m *QueryConsensusStatesRequest) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *QueryConsensusStatesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryConsensusStatesResponse is the response type for the +// Query/ConsensusStates RPC method +type QueryConsensusStatesResponse struct { + // consensus states associated with the identifier + ConsensusStates []ConsensusStateWithHeight `protobuf:"bytes,1,rep,name=consensus_states,json=consensusStates,proto3" json:"consensus_states"` + // pagination response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryConsensusStatesResponse) Reset() { *m = QueryConsensusStatesResponse{} } +func (m *QueryConsensusStatesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryConsensusStatesResponse) ProtoMessage() {} +func (*QueryConsensusStatesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{7} +} +func (m *QueryConsensusStatesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConsensusStatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConsensusStatesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConsensusStatesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConsensusStatesResponse.Merge(m, src) +} +func (m *QueryConsensusStatesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryConsensusStatesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConsensusStatesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConsensusStatesResponse proto.InternalMessageInfo + +func (m *QueryConsensusStatesResponse) GetConsensusStates() []ConsensusStateWithHeight { + if m != nil { + return m.ConsensusStates + } + return nil +} + +func (m *QueryConsensusStatesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryClientParamsRequest is the request type for the Query/ClientParams RPC +// method. +type QueryClientParamsRequest struct { +} + +func (m *QueryClientParamsRequest) Reset() { *m = QueryClientParamsRequest{} } +func (m *QueryClientParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryClientParamsRequest) ProtoMessage() {} +func (*QueryClientParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{8} +} +func (m *QueryClientParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClientParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClientParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClientParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClientParamsRequest.Merge(m, src) +} +func (m *QueryClientParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryClientParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClientParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClientParamsRequest proto.InternalMessageInfo + +// QueryClientParamsResponse is the response type for the Query/ClientParams RPC +// method. +type QueryClientParamsResponse struct { + // params defines the parameters of the module. + Params *Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params,omitempty"` +} + +func (m *QueryClientParamsResponse) Reset() { *m = QueryClientParamsResponse{} } +func (m *QueryClientParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryClientParamsResponse) ProtoMessage() {} +func (*QueryClientParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_833c7bc6da1addd1, []int{9} +} +func (m *QueryClientParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClientParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClientParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClientParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClientParamsResponse.Merge(m, src) +} +func (m *QueryClientParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryClientParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClientParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClientParamsResponse proto.InternalMessageInfo + +func (m *QueryClientParamsResponse) GetParams() *Params { + if m != nil { + return m.Params + } + return nil +} + +func init() { + proto.RegisterType((*QueryClientStateRequest)(nil), "ibcgo.core.client.v1.QueryClientStateRequest") + proto.RegisterType((*QueryClientStateResponse)(nil), "ibcgo.core.client.v1.QueryClientStateResponse") + proto.RegisterType((*QueryClientStatesRequest)(nil), "ibcgo.core.client.v1.QueryClientStatesRequest") + proto.RegisterType((*QueryClientStatesResponse)(nil), "ibcgo.core.client.v1.QueryClientStatesResponse") + proto.RegisterType((*QueryConsensusStateRequest)(nil), "ibcgo.core.client.v1.QueryConsensusStateRequest") + proto.RegisterType((*QueryConsensusStateResponse)(nil), "ibcgo.core.client.v1.QueryConsensusStateResponse") + proto.RegisterType((*QueryConsensusStatesRequest)(nil), "ibcgo.core.client.v1.QueryConsensusStatesRequest") + proto.RegisterType((*QueryConsensusStatesResponse)(nil), "ibcgo.core.client.v1.QueryConsensusStatesResponse") + proto.RegisterType((*QueryClientParamsRequest)(nil), "ibcgo.core.client.v1.QueryClientParamsRequest") + proto.RegisterType((*QueryClientParamsResponse)(nil), "ibcgo.core.client.v1.QueryClientParamsResponse") +} + +func init() { proto.RegisterFile("ibcgo/core/client/v1/query.proto", fileDescriptor_833c7bc6da1addd1) } + +var fileDescriptor_833c7bc6da1addd1 = []byte{ + // 817 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4b, 0x4f, 0x1b, 0x47, + 0x1c, 0xf7, 0xf0, 0x12, 0x8c, 0x0d, 0xae, 0x46, 0x6e, 0x31, 0x0b, 0x35, 0xc6, 0x48, 0xc5, 0x6d, + 0xe5, 0x19, 0xec, 0x3e, 0x2f, 0x3d, 0x94, 0xaa, 0x50, 0x2e, 0x15, 0x6c, 0x0f, 0x95, 0x7a, 0xb1, + 0x76, 0xd7, 0xe3, 0xf5, 0x4a, 0xf6, 0xce, 0xe2, 0x59, 0x5b, 0x42, 0x88, 0x0b, 0x1f, 0xa0, 0x8a, + 0x94, 0x5b, 0xae, 0xb9, 0xe5, 0x84, 0x72, 0xcb, 0x37, 0xe0, 0x88, 0x14, 0x29, 0xca, 0x29, 0x89, + 0x70, 0x3e, 0x43, 0xce, 0xd1, 0xce, 0xcc, 0x9a, 0x5d, 0xb3, 0x84, 0x25, 0x4a, 0x6e, 0xe3, 0xff, + 0xf3, 0xf7, 0xfb, 0xbf, 0xbc, 0xb0, 0xec, 0x98, 0x96, 0xcd, 0x88, 0xc5, 0xfa, 0x94, 0x58, 0x5d, + 0x87, 0xba, 0x3e, 0x19, 0xd6, 0xc9, 0xd1, 0x80, 0xf6, 0x8f, 0xb1, 0xd7, 0x67, 0x3e, 0x43, 0x05, + 0x61, 0x81, 0x03, 0x0b, 0x2c, 0x2d, 0xf0, 0xb0, 0xae, 0x7d, 0x67, 0x31, 0xde, 0x63, 0x9c, 0x98, + 0x06, 0xa7, 0xd2, 0x9c, 0x0c, 0xeb, 0x26, 0xf5, 0x8d, 0x3a, 0xf1, 0x0c, 0xdb, 0x71, 0x0d, 0xdf, + 0x61, 0xae, 0x8c, 0xa0, 0x6d, 0x24, 0xe6, 0x50, 0xb1, 0xa4, 0xc9, 0x8a, 0xcd, 0x98, 0xdd, 0xa5, + 0x44, 0xfc, 0x32, 0x07, 0x6d, 0x62, 0xb8, 0x2a, 0xbf, 0xb6, 0xa6, 0x54, 0x86, 0xe7, 0x10, 0xc3, + 0x75, 0x99, 0x2f, 0x42, 0x73, 0xa5, 0x2d, 0xd8, 0xcc, 0x66, 0xe2, 0x49, 0x82, 0x97, 0x94, 0x56, + 0x7e, 0x86, 0xcb, 0x87, 0x01, 0xa6, 0x3f, 0x44, 0x8e, 0x7f, 0x7c, 0xc3, 0xa7, 0x3a, 0x3d, 0x1a, + 0x50, 0xee, 0xa3, 0x55, 0xb8, 0x20, 0x33, 0x37, 0x9d, 0x56, 0x11, 0x94, 0x41, 0x75, 0x41, 0x9f, + 0x97, 0x82, 0xfd, 0x56, 0xe5, 0x1c, 0xc0, 0xe2, 0x4d, 0x47, 0xee, 0x31, 0x97, 0x53, 0xf4, 0x0b, + 0xcc, 0x29, 0x4f, 0x1e, 0xc8, 0x85, 0x73, 0xb6, 0x51, 0xc0, 0x12, 0x1f, 0x0e, 0xa1, 0xe3, 0xdf, + 0xdd, 0x63, 0x3d, 0x6b, 0x5d, 0x07, 0x40, 0x05, 0x38, 0xeb, 0xf5, 0x19, 0x6b, 0x17, 0xa7, 0xca, + 0xa0, 0x9a, 0xd3, 0xe5, 0x0f, 0xf4, 0x27, 0xcc, 0x89, 0x47, 0xb3, 0x43, 0x1d, 0xbb, 0xe3, 0x17, + 0xa7, 0x45, 0xb8, 0x35, 0x9c, 0x54, 0x6e, 0xfc, 0x97, 0xb0, 0xd9, 0x99, 0xb9, 0x78, 0xb5, 0x9e, + 0xd1, 0xb3, 0xc2, 0x4f, 0x8a, 0x2a, 0xe6, 0x4d, 0xc4, 0x3c, 0xe4, 0xba, 0x0b, 0xe1, 0x75, 0x33, + 0x14, 0xde, 0x6f, 0xb0, 0xec, 0x1c, 0x0e, 0x3a, 0x87, 0x65, 0xa3, 0x55, 0xe7, 0xf0, 0x81, 0x61, + 0x87, 0x75, 0xd2, 0x23, 0x9e, 0x95, 0x17, 0x00, 0xae, 0x24, 0x24, 0x51, 0x75, 0xf1, 0xe0, 0x62, + 0xb4, 0x2e, 0xbc, 0x08, 0xca, 0xd3, 0xd5, 0x6c, 0xe3, 0xfb, 0x64, 0x26, 0xfb, 0x2d, 0xea, 0xfa, + 0x4e, 0xdb, 0xa1, 0xad, 0x48, 0xb0, 0x9d, 0x52, 0x40, 0xec, 0xc9, 0xeb, 0xf5, 0xaf, 0x12, 0xd5, + 0x5c, 0xcf, 0x45, 0xea, 0xc9, 0xd1, 0x5e, 0x8c, 0xd7, 0x94, 0xe0, 0xb5, 0x75, 0x27, 0x2f, 0x09, + 0x37, 0x46, 0xec, 0x1c, 0x40, 0x4d, 0x12, 0x0b, 0x54, 0x2e, 0x1f, 0xf0, 0xd4, 0xb3, 0x82, 0xb6, + 0x60, 0xbe, 0x4f, 0x87, 0x0e, 0x77, 0x98, 0xdb, 0x74, 0x07, 0x3d, 0x93, 0xf6, 0x05, 0x92, 0x19, + 0x7d, 0x29, 0x14, 0xff, 0x2d, 0xa4, 0x31, 0xc3, 0x48, 0xaf, 0x23, 0x86, 0xb2, 0x95, 0x68, 0x13, + 0x2e, 0x76, 0x03, 0x7e, 0x7e, 0x68, 0x36, 0x53, 0x06, 0xd5, 0x79, 0x3d, 0x27, 0x85, 0xaa, 0xdf, + 0xcf, 0x00, 0x5c, 0x4d, 0x84, 0xac, 0xba, 0xf1, 0x1b, 0xcc, 0x5b, 0xa1, 0x26, 0xc5, 0xa0, 0x2e, + 0x59, 0xb1, 0x30, 0x9f, 0x77, 0x56, 0xcf, 0x92, 0xb1, 0xf3, 0x54, 0xf5, 0xde, 0x4d, 0x68, 0xfa, + 0xc7, 0x0c, 0xf3, 0x05, 0x80, 0x6b, 0xc9, 0x20, 0x54, 0x05, 0x9b, 0xf0, 0x8b, 0x89, 0x0a, 0x86, + 0x23, 0x8d, 0x93, 0x09, 0xc7, 0x03, 0xfd, 0xeb, 0xf8, 0x9d, 0x58, 0x09, 0xf2, 0xf1, 0x12, 0x7f, + 0xc2, 0xf1, 0xd5, 0x62, 0xbb, 0x7f, 0x60, 0xf4, 0x8d, 0x5e, 0x58, 0xcb, 0xca, 0x61, 0x6c, 0x65, + 0x43, 0x9d, 0xa2, 0xf8, 0x23, 0x9c, 0xf3, 0x84, 0x44, 0xcd, 0xc6, 0x2d, 0x9d, 0x54, 0x5e, 0xca, + 0xb6, 0xf1, 0x6e, 0x0e, 0xce, 0x8a, 0x98, 0xe8, 0x31, 0x80, 0xd9, 0xc8, 0x7e, 0xa2, 0x5a, 0xb2, + 0xff, 0x2d, 0x37, 0x58, 0xc3, 0x69, 0xcd, 0x25, 0xdc, 0xca, 0x4f, 0x67, 0xcf, 0xdf, 0x3e, 0x9c, + 0x22, 0xa8, 0x46, 0x1c, 0xd3, 0x4a, 0xfe, 0x1f, 0x51, 0x8d, 0x22, 0x27, 0xe3, 0x01, 0x3a, 0x45, + 0x8f, 0x00, 0xcc, 0x45, 0xaf, 0x08, 0x4a, 0x99, 0x37, 0xac, 0xa1, 0x46, 0x52, 0xdb, 0x2b, 0xa0, + 0xdf, 0x0a, 0xa0, 0x9b, 0x68, 0xe3, 0x4e, 0xa0, 0x68, 0x04, 0xe0, 0x52, 0x7c, 0x70, 0xd0, 0xf6, + 0x87, 0xd2, 0x25, 0x1d, 0x28, 0xad, 0x7e, 0x0f, 0x0f, 0x05, 0xb1, 0x2b, 0x20, 0xb6, 0x51, 0x2b, + 0x11, 0xe2, 0xc4, 0xdc, 0x47, 0xcb, 0x49, 0xc2, 0x6b, 0x45, 0x4e, 0x26, 0xee, 0xde, 0x29, 0x91, + 0x67, 0x21, 0xa2, 0x90, 0x82, 0x53, 0xf4, 0x14, 0xc0, 0xfc, 0xc4, 0x9e, 0xa1, 0xf4, 0xa0, 0xc7, + 0x8d, 0x68, 0xdc, 0xc7, 0x45, 0x11, 0xfd, 0x55, 0x10, 0x6d, 0xa0, 0xed, 0xfb, 0x12, 0x45, 0xff, + 0x8f, 0xe7, 0x46, 0x2e, 0x40, 0x8a, 0xb9, 0x89, 0xed, 0x5e, 0x8a, 0xb9, 0x89, 0xef, 0x63, 0xe5, + 0x6b, 0x81, 0x75, 0x19, 0x7d, 0x29, 0xb1, 0x8e, 0x61, 0xca, 0xc5, 0xdb, 0xd9, 0xbb, 0xb8, 0x2a, + 0x81, 0xcb, 0xab, 0x12, 0x78, 0x73, 0x55, 0x02, 0x0f, 0x46, 0xa5, 0xcc, 0xe5, 0xa8, 0x94, 0x79, + 0x39, 0x2a, 0x65, 0xfe, 0xab, 0xd9, 0x8e, 0xdf, 0x19, 0x98, 0xd8, 0x62, 0x3d, 0xa2, 0xbe, 0xc8, + 0x1c, 0xd3, 0xaa, 0x85, 0x5f, 0x5b, 0xdb, 0x8d, 0x9a, 0x0a, 0xe6, 0x1f, 0x7b, 0x94, 0x9b, 0x73, + 0xe2, 0xf6, 0xff, 0xf0, 0x3e, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x73, 0xec, 0x7c, 0xf6, 0x09, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // ClientState queries an IBC light client. + ClientState(ctx context.Context, in *QueryClientStateRequest, opts ...grpc.CallOption) (*QueryClientStateResponse, error) + // ClientStates queries all the IBC light clients of a chain. + ClientStates(ctx context.Context, in *QueryClientStatesRequest, opts ...grpc.CallOption) (*QueryClientStatesResponse, error) + // ConsensusState queries a consensus state associated with a client state at + // a given height. + ConsensusState(ctx context.Context, in *QueryConsensusStateRequest, opts ...grpc.CallOption) (*QueryConsensusStateResponse, error) + // ConsensusStates queries all the consensus state associated with a given + // client. + ConsensusStates(ctx context.Context, in *QueryConsensusStatesRequest, opts ...grpc.CallOption) (*QueryConsensusStatesResponse, error) + // ClientParams queries all parameters of the ibc client. + ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) ClientState(ctx context.Context, in *QueryClientStateRequest, opts ...grpc.CallOption) (*QueryClientStateResponse, error) { + out := new(QueryClientStateResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ClientStates(ctx context.Context, in *QueryClientStatesRequest, opts ...grpc.CallOption) (*QueryClientStatesResponse, error) { + out := new(QueryClientStatesResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientStates", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ConsensusState(ctx context.Context, in *QueryConsensusStateRequest, opts ...grpc.CallOption) (*QueryConsensusStateResponse, error) { + out := new(QueryConsensusStateResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ConsensusState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ConsensusStates(ctx context.Context, in *QueryConsensusStatesRequest, opts ...grpc.CallOption) (*QueryConsensusStatesResponse, error) { + out := new(QueryConsensusStatesResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ConsensusStates", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ClientParams(ctx context.Context, in *QueryClientParamsRequest, opts ...grpc.CallOption) (*QueryClientParamsResponse, error) { + out := new(QueryClientParamsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Query/ClientParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // ClientState queries an IBC light client. + ClientState(context.Context, *QueryClientStateRequest) (*QueryClientStateResponse, error) + // ClientStates queries all the IBC light clients of a chain. + ClientStates(context.Context, *QueryClientStatesRequest) (*QueryClientStatesResponse, error) + // ConsensusState queries a consensus state associated with a client state at + // a given height. + ConsensusState(context.Context, *QueryConsensusStateRequest) (*QueryConsensusStateResponse, error) + // ConsensusStates queries all the consensus state associated with a given + // client. + ConsensusStates(context.Context, *QueryConsensusStatesRequest) (*QueryConsensusStatesResponse, error) + // ClientParams queries all parameters of the ibc client. + ClientParams(context.Context, *QueryClientParamsRequest) (*QueryClientParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) ClientState(ctx context.Context, req *QueryClientStateRequest) (*QueryClientStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClientState not implemented") +} +func (*UnimplementedQueryServer) ClientStates(ctx context.Context, req *QueryClientStatesRequest) (*QueryClientStatesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClientStates not implemented") +} +func (*UnimplementedQueryServer) ConsensusState(ctx context.Context, req *QueryConsensusStateRequest) (*QueryConsensusStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConsensusState not implemented") +} +func (*UnimplementedQueryServer) ConsensusStates(ctx context.Context, req *QueryConsensusStatesRequest) (*QueryConsensusStatesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConsensusStates not implemented") +} +func (*UnimplementedQueryServer) ClientParams(ctx context.Context, req *QueryClientParamsRequest) (*QueryClientParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClientParams not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_ClientState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryClientStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ClientState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Query/ClientState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ClientState(ctx, req.(*QueryClientStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ClientStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryClientStatesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ClientStates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Query/ClientStates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ClientStates(ctx, req.(*QueryClientStatesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ConsensusState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryConsensusStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ConsensusState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Query/ConsensusState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ConsensusState(ctx, req.(*QueryConsensusStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ConsensusStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryConsensusStatesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ConsensusStates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Query/ConsensusStates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ConsensusStates(ctx, req.(*QueryConsensusStatesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ClientParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryClientParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ClientParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Query/ClientParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ClientParams(ctx, req.(*QueryClientParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibcgo.core.client.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ClientState", + Handler: _Query_ClientState_Handler, + }, + { + MethodName: "ClientStates", + Handler: _Query_ClientStates_Handler, + }, + { + MethodName: "ConsensusState", + Handler: _Query_ConsensusState_Handler, + }, + { + MethodName: "ConsensusStates", + Handler: _Query_ConsensusStates_Handler, + }, + { + MethodName: "ClientParams", + Handler: _Query_ClientParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibcgo/core/client/v1/query.proto", +} + +func (m *QueryClientStateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClientStateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClientStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryClientStateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClientStateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClientStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryClientStatesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClientStatesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClientStatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryClientStatesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClientStatesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClientStatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClientStates) > 0 { + for iNdEx := len(m.ClientStates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClientStates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryConsensusStateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConsensusStateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConsensusStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LatestHeight { + i-- + if m.LatestHeight { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.RevisionHeight != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RevisionHeight)) + i-- + dAtA[i] = 0x18 + } + if m.RevisionNumber != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RevisionNumber)) + i-- + dAtA[i] = 0x10 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConsensusStateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConsensusStateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConsensusStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConsensusStatesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConsensusStatesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConsensusStatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConsensusStatesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConsensusStatesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConsensusStatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ConsensusStates) > 0 { + for iNdEx := len(m.ConsensusStates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConsensusStates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryClientParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClientParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClientParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryClientParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClientParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClientParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Params != nil { + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryClientStateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClientStateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryClientStatesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClientStatesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClientStates) > 0 { + for _, e := range m.ClientStates { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryConsensusStateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.RevisionNumber != 0 { + n += 1 + sovQuery(uint64(m.RevisionNumber)) + } + if m.RevisionHeight != 0 { + n += 1 + sovQuery(uint64(m.RevisionHeight)) + } + if m.LatestHeight { + n += 2 + } + return n +} + +func (m *QueryConsensusStateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryConsensusStatesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryConsensusStatesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ConsensusStates) > 0 { + for _, e := range m.ConsensusStates { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClientParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryClientParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Params != nil { + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryClientStateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClientStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClientStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClientStateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClientStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClientStatesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClientStatesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClientStatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClientStatesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClientStatesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClientStatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientStates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientStates = append(m.ClientStates, IdentifiedClientState{}) + if err := m.ClientStates[len(m.ClientStates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConsensusStateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConsensusStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConsensusStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionNumber", wireType) + } + m.RevisionNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevisionNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHeight", wireType) + } + m.RevisionHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevisionHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestHeight", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LatestHeight = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConsensusStateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConsensusStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConsensusStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConsensusStatesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConsensusStatesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConsensusStatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConsensusStatesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConsensusStatesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConsensusStatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusStates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsensusStates = append(m.ConsensusStates, ConsensusStateWithHeight{}) + if err := m.ConsensusStates[len(m.ConsensusStates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClientParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClientParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClientParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClientParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClientParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClientParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Params == nil { + m.Params = &Params{} + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/02-client/types/query.pb.gw.go b/core/02-client/types/query.pb.gw.go new file mode 100644 index 0000000000..ceef5c325f --- /dev/null +++ b/core/02-client/types/query.pb.gw.go @@ -0,0 +1,602 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: ibcgo/core/client/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Query_ClientState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClientStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id") + } + + protoReq.ClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err) + } + + msg, err := client.ClientState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ClientState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClientStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id") + } + + protoReq.ClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err) + } + + msg, err := server.ClientState(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ClientStates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_ClientStates_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClientStatesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ClientStates_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ClientStates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ClientStates_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClientStatesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ClientStates_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ClientStates(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ConsensusState_0 = &utilities.DoubleArray{Encoding: map[string]int{"client_id": 0, "revision_number": 1, "revision_height": 2}, Base: []int{1, 1, 2, 3, 0, 0, 0}, Check: []int{0, 1, 1, 1, 2, 3, 4}} +) + +func request_Query_ConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConsensusStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id") + } + + protoReq.ClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err) + } + + val, ok = pathParams["revision_number"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number") + } + + protoReq.RevisionNumber, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err) + } + + val, ok = pathParams["revision_height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height") + } + + protoReq.RevisionHeight, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConsensusState_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ConsensusState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConsensusStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id") + } + + protoReq.ClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err) + } + + val, ok = pathParams["revision_number"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number") + } + + protoReq.RevisionNumber, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err) + } + + val, ok = pathParams["revision_height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height") + } + + protoReq.RevisionHeight, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConsensusState_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ConsensusState(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ConsensusStates_0 = &utilities.DoubleArray{Encoding: map[string]int{"client_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_ConsensusStates_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConsensusStatesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id") + } + + protoReq.ClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConsensusStates_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ConsensusStates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ConsensusStates_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConsensusStatesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id") + } + + protoReq.ClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConsensusStates_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ConsensusStates(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ClientParams_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClientParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.ClientParams(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ClientParams_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClientParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.ClientParams(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_ClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ClientState_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClientStates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ClientStates_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClientStates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ConsensusState_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConsensusStates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ConsensusStates_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConsensusStates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClientParams_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ClientParams_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClientParams_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_ClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ClientState_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClientStates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ClientStates_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClientStates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ConsensusState_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConsensusStates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ConsensusStates_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConsensusStates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClientParams_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ClientParams_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClientParams_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_ClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "client_states", "client_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ClientStates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "client", "v1", "client_states"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ConsensusState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9}, []string{"ibc", "core", "client", "v1", "consensus_states", "client_id", "revision", "revision_number", "height", "revision_height"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ConsensusStates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "client", "v1", "consensus_states", "client_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ClientParams_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"ibc", "client", "v1", "params"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_ClientState_0 = runtime.ForwardResponseMessage + + forward_Query_ClientStates_0 = runtime.ForwardResponseMessage + + forward_Query_ConsensusState_0 = runtime.ForwardResponseMessage + + forward_Query_ConsensusStates_0 = runtime.ForwardResponseMessage + + forward_Query_ClientParams_0 = runtime.ForwardResponseMessage +) diff --git a/core/02-client/types/tx.pb.go b/core/02-client/types/tx.pb.go new file mode 100644 index 0000000000..1adac38765 --- /dev/null +++ b/core/02-client/types/tx.pb.go @@ -0,0 +1,2074 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/client/v1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/codec/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCreateClient defines a message to create an IBC client +type MsgCreateClient struct { + // light client state + ClientState *types.Any `protobuf:"bytes,1,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` + // consensus state associated with the client that corresponds to a given + // height. + ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` + // signer address + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgCreateClient) Reset() { *m = MsgCreateClient{} } +func (m *MsgCreateClient) String() string { return proto.CompactTextString(m) } +func (*MsgCreateClient) ProtoMessage() {} +func (*MsgCreateClient) Descriptor() ([]byte, []int) { + return fileDescriptor_3848774a44f81317, []int{0} +} +func (m *MsgCreateClient) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateClient.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateClient) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateClient.Merge(m, src) +} +func (m *MsgCreateClient) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateClient) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateClient.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateClient proto.InternalMessageInfo + +// MsgCreateClientResponse defines the Msg/CreateClient response type. +type MsgCreateClientResponse struct { +} + +func (m *MsgCreateClientResponse) Reset() { *m = MsgCreateClientResponse{} } +func (m *MsgCreateClientResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateClientResponse) ProtoMessage() {} +func (*MsgCreateClientResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3848774a44f81317, []int{1} +} +func (m *MsgCreateClientResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateClientResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateClientResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateClientResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateClientResponse.Merge(m, src) +} +func (m *MsgCreateClientResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateClientResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateClientResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateClientResponse proto.InternalMessageInfo + +// MsgUpdateClient defines an sdk.Msg to update a IBC client state using +// the given header. +type MsgUpdateClient struct { + // client unique identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // header to update the light client + Header *types.Any `protobuf:"bytes,2,opt,name=header,proto3" json:"header,omitempty"` + // signer address + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgUpdateClient) Reset() { *m = MsgUpdateClient{} } +func (m *MsgUpdateClient) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateClient) ProtoMessage() {} +func (*MsgUpdateClient) Descriptor() ([]byte, []int) { + return fileDescriptor_3848774a44f81317, []int{2} +} +func (m *MsgUpdateClient) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateClient.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateClient) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateClient.Merge(m, src) +} +func (m *MsgUpdateClient) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateClient) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateClient.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateClient proto.InternalMessageInfo + +// MsgUpdateClientResponse defines the Msg/UpdateClient response type. +type MsgUpdateClientResponse struct { +} + +func (m *MsgUpdateClientResponse) Reset() { *m = MsgUpdateClientResponse{} } +func (m *MsgUpdateClientResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateClientResponse) ProtoMessage() {} +func (*MsgUpdateClientResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3848774a44f81317, []int{3} +} +func (m *MsgUpdateClientResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateClientResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateClientResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateClientResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateClientResponse.Merge(m, src) +} +func (m *MsgUpdateClientResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateClientResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateClientResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateClientResponse proto.InternalMessageInfo + +// MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client +// state +type MsgUpgradeClient struct { + // client unique identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // upgraded client state + ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` + // upgraded consensus state, only contains enough information to serve as a + // basis of trust in update logic + ConsensusState *types.Any `protobuf:"bytes,3,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` + // proof that old chain committed to new client + ProofUpgradeClient []byte `protobuf:"bytes,4,opt,name=proof_upgrade_client,json=proofUpgradeClient,proto3" json:"proof_upgrade_client,omitempty" yaml:"proof_upgrade_client"` + // proof that old chain committed to new consensus state + ProofUpgradeConsensusState []byte `protobuf:"bytes,5,opt,name=proof_upgrade_consensus_state,json=proofUpgradeConsensusState,proto3" json:"proof_upgrade_consensus_state,omitempty" yaml:"proof_upgrade_consensus_state"` + // signer address + Signer string `protobuf:"bytes,6,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgUpgradeClient) Reset() { *m = MsgUpgradeClient{} } +func (m *MsgUpgradeClient) String() string { return proto.CompactTextString(m) } +func (*MsgUpgradeClient) ProtoMessage() {} +func (*MsgUpgradeClient) Descriptor() ([]byte, []int) { + return fileDescriptor_3848774a44f81317, []int{4} +} +func (m *MsgUpgradeClient) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpgradeClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpgradeClient.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpgradeClient) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpgradeClient.Merge(m, src) +} +func (m *MsgUpgradeClient) XXX_Size() int { + return m.Size() +} +func (m *MsgUpgradeClient) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpgradeClient.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpgradeClient proto.InternalMessageInfo + +// MsgUpgradeClientResponse defines the Msg/UpgradeClient response type. +type MsgUpgradeClientResponse struct { +} + +func (m *MsgUpgradeClientResponse) Reset() { *m = MsgUpgradeClientResponse{} } +func (m *MsgUpgradeClientResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpgradeClientResponse) ProtoMessage() {} +func (*MsgUpgradeClientResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3848774a44f81317, []int{5} +} +func (m *MsgUpgradeClientResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpgradeClientResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpgradeClientResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpgradeClientResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpgradeClientResponse.Merge(m, src) +} +func (m *MsgUpgradeClientResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpgradeClientResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpgradeClientResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpgradeClientResponse proto.InternalMessageInfo + +// MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for +// light client misbehaviour. +type MsgSubmitMisbehaviour struct { + // client unique identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // misbehaviour used for freezing the light client + Misbehaviour *types.Any `protobuf:"bytes,2,opt,name=misbehaviour,proto3" json:"misbehaviour,omitempty"` + // signer address + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgSubmitMisbehaviour) Reset() { *m = MsgSubmitMisbehaviour{} } +func (m *MsgSubmitMisbehaviour) String() string { return proto.CompactTextString(m) } +func (*MsgSubmitMisbehaviour) ProtoMessage() {} +func (*MsgSubmitMisbehaviour) Descriptor() ([]byte, []int) { + return fileDescriptor_3848774a44f81317, []int{6} +} +func (m *MsgSubmitMisbehaviour) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSubmitMisbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSubmitMisbehaviour.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSubmitMisbehaviour) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSubmitMisbehaviour.Merge(m, src) +} +func (m *MsgSubmitMisbehaviour) XXX_Size() int { + return m.Size() +} +func (m *MsgSubmitMisbehaviour) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSubmitMisbehaviour.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSubmitMisbehaviour proto.InternalMessageInfo + +// MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response +// type. +type MsgSubmitMisbehaviourResponse struct { +} + +func (m *MsgSubmitMisbehaviourResponse) Reset() { *m = MsgSubmitMisbehaviourResponse{} } +func (m *MsgSubmitMisbehaviourResponse) String() string { return proto.CompactTextString(m) } +func (*MsgSubmitMisbehaviourResponse) ProtoMessage() {} +func (*MsgSubmitMisbehaviourResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3848774a44f81317, []int{7} +} +func (m *MsgSubmitMisbehaviourResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSubmitMisbehaviourResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSubmitMisbehaviourResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSubmitMisbehaviourResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSubmitMisbehaviourResponse.Merge(m, src) +} +func (m *MsgSubmitMisbehaviourResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgSubmitMisbehaviourResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSubmitMisbehaviourResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSubmitMisbehaviourResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCreateClient)(nil), "ibcgo.core.client.v1.MsgCreateClient") + proto.RegisterType((*MsgCreateClientResponse)(nil), "ibcgo.core.client.v1.MsgCreateClientResponse") + proto.RegisterType((*MsgUpdateClient)(nil), "ibcgo.core.client.v1.MsgUpdateClient") + proto.RegisterType((*MsgUpdateClientResponse)(nil), "ibcgo.core.client.v1.MsgUpdateClientResponse") + proto.RegisterType((*MsgUpgradeClient)(nil), "ibcgo.core.client.v1.MsgUpgradeClient") + proto.RegisterType((*MsgUpgradeClientResponse)(nil), "ibcgo.core.client.v1.MsgUpgradeClientResponse") + proto.RegisterType((*MsgSubmitMisbehaviour)(nil), "ibcgo.core.client.v1.MsgSubmitMisbehaviour") + proto.RegisterType((*MsgSubmitMisbehaviourResponse)(nil), "ibcgo.core.client.v1.MsgSubmitMisbehaviourResponse") +} + +func init() { proto.RegisterFile("ibcgo/core/client/v1/tx.proto", fileDescriptor_3848774a44f81317) } + +var fileDescriptor_3848774a44f81317 = []byte{ + // 600 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x3d, 0x6f, 0xd3, 0x40, + 0x18, 0x8e, 0x1b, 0x88, 0xda, 0x6b, 0xa0, 0x95, 0x09, 0x6d, 0x6a, 0x14, 0x3b, 0x58, 0x80, 0x22, + 0x41, 0xce, 0x24, 0x5d, 0x50, 0x37, 0xd2, 0x01, 0x31, 0x44, 0x02, 0x57, 0x0c, 0xb0, 0x44, 0xfe, + 0xb8, 0x5e, 0x4e, 0x24, 0xbe, 0xc8, 0x67, 0x47, 0xe4, 0x1f, 0x30, 0x32, 0xf0, 0x03, 0x3a, 0xf1, + 0x03, 0xf8, 0x15, 0x8c, 0x1d, 0x18, 0x98, 0xa2, 0x2a, 0x59, 0x98, 0xf3, 0x0b, 0x50, 0xee, 0x9c, + 0x10, 0x1b, 0xc7, 0x0a, 0x5f, 0x9b, 0xdf, 0x7b, 0x9f, 0x7b, 0x9e, 0xf7, 0xf1, 0xfb, 0xde, 0x1d, + 0xa8, 0x10, 0xdb, 0xc1, 0xd4, 0x70, 0xa8, 0x8f, 0x0c, 0xa7, 0x47, 0x90, 0x17, 0x18, 0xc3, 0x86, + 0x11, 0xbc, 0x83, 0x03, 0x9f, 0x06, 0x54, 0x2e, 0xf1, 0x34, 0x9c, 0xa7, 0xa1, 0x48, 0xc3, 0x61, + 0x43, 0x29, 0x61, 0x8a, 0x29, 0x07, 0x18, 0xf3, 0x2f, 0x81, 0x55, 0x8e, 0x30, 0xa5, 0xb8, 0x87, + 0x0c, 0x1e, 0xd9, 0xe1, 0xb9, 0x61, 0x79, 0xa3, 0x28, 0x75, 0x37, 0x55, 0x25, 0x22, 0xe4, 0x10, + 0xfd, 0x4a, 0x02, 0x7b, 0x6d, 0x86, 0x4f, 0x7d, 0x64, 0x05, 0xe8, 0x94, 0x67, 0xe4, 0x17, 0xa0, + 0x28, 0x30, 0x1d, 0x16, 0x58, 0x01, 0x2a, 0x4b, 0x55, 0xa9, 0xb6, 0xdb, 0x2c, 0x41, 0x21, 0x04, + 0x17, 0x42, 0xf0, 0xa9, 0x37, 0x6a, 0x1d, 0xce, 0xc6, 0xda, 0xad, 0x91, 0xd5, 0xef, 0x9d, 0xe8, + 0xab, 0x7b, 0x74, 0x73, 0x57, 0x84, 0x67, 0xf3, 0x48, 0x7e, 0x0d, 0xf6, 0x1c, 0xea, 0x31, 0xe4, + 0xb1, 0x90, 0x45, 0xa4, 0x5b, 0x19, 0xa4, 0xca, 0x6c, 0xac, 0x1d, 0x44, 0xa4, 0xf1, 0x6d, 0xba, + 0x79, 0x73, 0xb9, 0x22, 0xa8, 0x0f, 0x40, 0x81, 0x11, 0xec, 0x21, 0xbf, 0x9c, 0xaf, 0x4a, 0xb5, + 0x1d, 0x33, 0x8a, 0x4e, 0xb6, 0xdf, 0x5f, 0x68, 0xb9, 0xef, 0x17, 0x5a, 0x4e, 0x3f, 0x02, 0x87, + 0x09, 0x87, 0x26, 0x62, 0x83, 0x39, 0x8b, 0xfe, 0x51, 0xb8, 0x7f, 0x35, 0x70, 0x7f, 0xba, 0x6f, + 0x80, 0x9d, 0xc8, 0x09, 0x71, 0xb9, 0xf5, 0x9d, 0x56, 0x69, 0x36, 0xd6, 0xf6, 0x63, 0x26, 0x89, + 0xab, 0x9b, 0xdb, 0xe2, 0xfb, 0xb9, 0x2b, 0x3f, 0x02, 0x85, 0x2e, 0xb2, 0x5c, 0xe4, 0x67, 0xb9, + 0x32, 0x23, 0xcc, 0xc6, 0x15, 0xaf, 0x56, 0xb5, 0xac, 0xf8, 0x6b, 0x1e, 0xec, 0xf3, 0x1c, 0xf6, + 0x2d, 0xf7, 0x2f, 0x4a, 0x4e, 0xf6, 0x78, 0xeb, 0x7f, 0xf4, 0x38, 0xff, 0x8f, 0x7a, 0xfc, 0x12, + 0x94, 0x06, 0x3e, 0xa5, 0xe7, 0x9d, 0x50, 0xd8, 0xee, 0x08, 0xdd, 0xf2, 0xb5, 0xaa, 0x54, 0x2b, + 0xb6, 0xb4, 0xd9, 0x58, 0xbb, 0x23, 0x98, 0xd2, 0x50, 0xba, 0x29, 0xf3, 0xe5, 0xf8, 0x2f, 0x7b, + 0x0b, 0x2a, 0x09, 0x70, 0xa2, 0xf6, 0xeb, 0x9c, 0xbb, 0x36, 0x1b, 0x6b, 0xf7, 0x52, 0xb9, 0x93, + 0x35, 0x2b, 0x31, 0x91, 0x75, 0x33, 0x5a, 0x58, 0xd3, 0x71, 0x05, 0x94, 0x93, 0x5d, 0x5d, 0xb6, + 0xfc, 0x93, 0x04, 0x6e, 0xb7, 0x19, 0x3e, 0x0b, 0xed, 0x3e, 0x09, 0xda, 0x84, 0xd9, 0xa8, 0x6b, + 0x0d, 0x09, 0x0d, 0xfd, 0x3f, 0xe9, 0xfb, 0x13, 0x50, 0xec, 0xaf, 0x50, 0x64, 0x0e, 0x6c, 0x0c, + 0xb9, 0xc1, 0xd8, 0x6a, 0xa0, 0x92, 0x5a, 0xe7, 0xc2, 0x49, 0xf3, 0x73, 0x1e, 0xe4, 0xdb, 0x0c, + 0xcb, 0x2e, 0x28, 0xc6, 0x2e, 0x9c, 0xfb, 0x30, 0xed, 0xbe, 0x83, 0x89, 0x53, 0xab, 0xd4, 0x37, + 0x82, 0x2d, 0xd4, 0xe6, 0x2a, 0xb1, 0x83, 0xbd, 0x5e, 0x65, 0x15, 0x96, 0xa1, 0x92, 0x76, 0x20, + 0x65, 0x0c, 0x6e, 0xc4, 0x27, 0xeb, 0x41, 0xc6, 0xfe, 0x15, 0x9c, 0x02, 0x37, 0xc3, 0x2d, 0x85, + 0x86, 0x40, 0x4e, 0x19, 0x81, 0x87, 0x6b, 0x59, 0x7e, 0x05, 0x2b, 0xc7, 0xbf, 0x01, 0x5e, 0xe8, + 0xb6, 0x9e, 0x7d, 0x99, 0xa8, 0xd2, 0xe5, 0x44, 0x95, 0xae, 0x26, 0xaa, 0xf4, 0x61, 0xaa, 0xe6, + 0x2e, 0xa7, 0x6a, 0xee, 0xdb, 0x54, 0xcd, 0xbd, 0xa9, 0x63, 0x12, 0x74, 0x43, 0x1b, 0x3a, 0xb4, + 0x6f, 0x38, 0x94, 0xf5, 0x29, 0x33, 0x88, 0xed, 0xd4, 0x17, 0x2f, 0xce, 0xe3, 0x66, 0x3d, 0x7a, + 0x74, 0x82, 0xd1, 0x00, 0x31, 0xbb, 0xc0, 0x87, 0xeb, 0xf8, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xb4, 0xb9, 0x0f, 0xae, 0xfc, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateClient defines a rpc handler method for MsgCreateClient. + CreateClient(ctx context.Context, in *MsgCreateClient, opts ...grpc.CallOption) (*MsgCreateClientResponse, error) + // UpdateClient defines a rpc handler method for MsgUpdateClient. + UpdateClient(ctx context.Context, in *MsgUpdateClient, opts ...grpc.CallOption) (*MsgUpdateClientResponse, error) + // UpgradeClient defines a rpc handler method for MsgUpgradeClient. + UpgradeClient(ctx context.Context, in *MsgUpgradeClient, opts ...grpc.CallOption) (*MsgUpgradeClientResponse, error) + // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. + SubmitMisbehaviour(ctx context.Context, in *MsgSubmitMisbehaviour, opts ...grpc.CallOption) (*MsgSubmitMisbehaviourResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateClient(ctx context.Context, in *MsgCreateClient, opts ...grpc.CallOption) (*MsgCreateClientResponse, error) { + out := new(MsgCreateClientResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/CreateClient", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateClient(ctx context.Context, in *MsgUpdateClient, opts ...grpc.CallOption) (*MsgUpdateClientResponse, error) { + out := new(MsgUpdateClientResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/UpdateClient", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpgradeClient(ctx context.Context, in *MsgUpgradeClient, opts ...grpc.CallOption) (*MsgUpgradeClientResponse, error) { + out := new(MsgUpgradeClientResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/UpgradeClient", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) SubmitMisbehaviour(ctx context.Context, in *MsgSubmitMisbehaviour, opts ...grpc.CallOption) (*MsgSubmitMisbehaviourResponse, error) { + out := new(MsgSubmitMisbehaviourResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.client.v1.Msg/SubmitMisbehaviour", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateClient defines a rpc handler method for MsgCreateClient. + CreateClient(context.Context, *MsgCreateClient) (*MsgCreateClientResponse, error) + // UpdateClient defines a rpc handler method for MsgUpdateClient. + UpdateClient(context.Context, *MsgUpdateClient) (*MsgUpdateClientResponse, error) + // UpgradeClient defines a rpc handler method for MsgUpgradeClient. + UpgradeClient(context.Context, *MsgUpgradeClient) (*MsgUpgradeClientResponse, error) + // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. + SubmitMisbehaviour(context.Context, *MsgSubmitMisbehaviour) (*MsgSubmitMisbehaviourResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateClient(ctx context.Context, req *MsgCreateClient) (*MsgCreateClientResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateClient not implemented") +} +func (*UnimplementedMsgServer) UpdateClient(ctx context.Context, req *MsgUpdateClient) (*MsgUpdateClientResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateClient not implemented") +} +func (*UnimplementedMsgServer) UpgradeClient(ctx context.Context, req *MsgUpgradeClient) (*MsgUpgradeClientResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeClient not implemented") +} +func (*UnimplementedMsgServer) SubmitMisbehaviour(ctx context.Context, req *MsgSubmitMisbehaviour) (*MsgSubmitMisbehaviourResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SubmitMisbehaviour not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateClient) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateClient(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Msg/CreateClient", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateClient(ctx, req.(*MsgCreateClient)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateClient) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateClient(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Msg/UpdateClient", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateClient(ctx, req.(*MsgUpdateClient)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpgradeClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpgradeClient) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpgradeClient(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Msg/UpgradeClient", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpgradeClient(ctx, req.(*MsgUpgradeClient)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_SubmitMisbehaviour_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgSubmitMisbehaviour) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).SubmitMisbehaviour(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.client.v1.Msg/SubmitMisbehaviour", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).SubmitMisbehaviour(ctx, req.(*MsgSubmitMisbehaviour)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibcgo.core.client.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateClient", + Handler: _Msg_CreateClient_Handler, + }, + { + MethodName: "UpdateClient", + Handler: _Msg_UpdateClient_Handler, + }, + { + MethodName: "UpgradeClient", + Handler: _Msg_UpgradeClient_Handler, + }, + { + MethodName: "SubmitMisbehaviour", + Handler: _Msg_SubmitMisbehaviour_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibcgo/core/client/v1/tx.proto", +} + +func (m *MsgCreateClient) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateClient) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateClientResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateClientResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateClient) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateClient) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateClientResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateClientResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpgradeClient) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpgradeClient) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpgradeClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x32 + } + if len(m.ProofUpgradeConsensusState) > 0 { + i -= len(m.ProofUpgradeConsensusState) + copy(dAtA[i:], m.ProofUpgradeConsensusState) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofUpgradeConsensusState))) + i-- + dAtA[i] = 0x2a + } + if len(m.ProofUpgradeClient) > 0 { + i -= len(m.ProofUpgradeClient) + copy(dAtA[i:], m.ProofUpgradeClient) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofUpgradeClient))) + i-- + dAtA[i] = 0x22 + } + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpgradeClientResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpgradeClientResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpgradeClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgSubmitMisbehaviour) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSubmitMisbehaviour) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSubmitMisbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if m.Misbehaviour != nil { + { + size, err := m.Misbehaviour.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgSubmitMisbehaviourResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSubmitMisbehaviourResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSubmitMisbehaviourResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreateClient) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovTx(uint64(l)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgCreateClientResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateClient) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpdateClientResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpgradeClient) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovTx(uint64(l)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofUpgradeClient) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofUpgradeConsensusState) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpgradeClientResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgSubmitMisbehaviour) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Misbehaviour != nil { + l = m.Misbehaviour.Size() + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgSubmitMisbehaviourResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreateClient) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateClient: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateClient: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateClientResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateClientResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateClientResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateClient) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateClient: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateClient: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &types.Any{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateClientResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateClientResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateClientResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpgradeClient) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpgradeClient: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpgradeClient: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofUpgradeClient", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofUpgradeClient = append(m.ProofUpgradeClient[:0], dAtA[iNdEx:postIndex]...) + if m.ProofUpgradeClient == nil { + m.ProofUpgradeClient = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofUpgradeConsensusState", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofUpgradeConsensusState = append(m.ProofUpgradeConsensusState[:0], dAtA[iNdEx:postIndex]...) + if m.ProofUpgradeConsensusState == nil { + m.ProofUpgradeConsensusState = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpgradeClientResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpgradeClientResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpgradeClientResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgSubmitMisbehaviour) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSubmitMisbehaviour: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSubmitMisbehaviour: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Misbehaviour", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Misbehaviour == nil { + m.Misbehaviour = &types.Any{} + } + if err := m.Misbehaviour.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgSubmitMisbehaviourResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSubmitMisbehaviourResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSubmitMisbehaviourResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/03-connection/client/cli/cli.go b/core/03-connection/client/cli/cli.go new file mode 100644 index 0000000000..01bb6f9b11 --- /dev/null +++ b/core/03-connection/client/cli/cli.go @@ -0,0 +1,46 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" +) + +// GetQueryCmd returns the query commands for IBC connections +func GetQueryCmd() *cobra.Command { + queryCmd := &cobra.Command{ + Use: types.SubModuleName, + Short: "IBC connection query subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + } + + queryCmd.AddCommand( + GetCmdQueryConnections(), + GetCmdQueryConnection(), + GetCmdQueryClientConnections(), + ) + + return queryCmd +} + +// NewTxCmd returns a CLI command handler for all x/ibc connection transaction commands. +func NewTxCmd() *cobra.Command { + txCmd := &cobra.Command{ + Use: types.SubModuleName, + Short: "IBC connection transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + txCmd.AddCommand( + NewConnectionOpenInitCmd(), + NewConnectionOpenTryCmd(), + NewConnectionOpenAckCmd(), + NewConnectionOpenConfirmCmd(), + ) + + return txCmd +} diff --git a/core/03-connection/client/cli/query.go b/core/03-connection/client/cli/query.go new file mode 100644 index 0000000000..21c4bd8f57 --- /dev/null +++ b/core/03-connection/client/cli/query.go @@ -0,0 +1,118 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// GetCmdQueryConnections defines the command to query all the connection ends +// that this chain mantains. +func GetCmdQueryConnections() *cobra.Command { + cmd := &cobra.Command{ + Use: "connections", + Short: "Query all connections", + Long: "Query all connections ends from a chain", + Example: fmt.Sprintf("%s query %s %s connections", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + req := &types.QueryConnectionsRequest{ + Pagination: pageReq, + } + + res, err := queryClient.Connections(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "connection ends") + + return cmd +} + +// GetCmdQueryConnection defines the command to query a connection end +func GetCmdQueryConnection() *cobra.Command { + cmd := &cobra.Command{ + Use: "end [connection-id]", + Short: "Query stored connection end", + Long: "Query stored connection end", + Example: fmt.Sprintf("%s query %s %s end [connection-id]", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + connectionID := args[0] + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + connRes, err := utils.QueryConnection(clientCtx, connectionID, prove) + if err != nil { + return err + } + + clientCtx = clientCtx.WithHeight(int64(connRes.ProofHeight.RevisionHeight)) + return clientCtx.PrintProto(connRes) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryClientConnections defines the command to query a client connections +func GetCmdQueryClientConnections() *cobra.Command { + cmd := &cobra.Command{ + Use: "path [client-id]", + Short: "Query stored client connection paths", + Long: "Query stored client connection paths", + Example: fmt.Sprintf("%s query %s %s path [client-id]", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + clientID := args[0] + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + connPathsRes, err := utils.QueryClientConnections(clientCtx, clientID, prove) + if err != nil { + return err + } + + clientCtx = clientCtx.WithHeight(int64(connPathsRes.ProofHeight.RevisionHeight)) + return clientCtx.PrintProto(connPathsRes) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/core/03-connection/client/cli/tx.go b/core/03-connection/client/cli/tx.go new file mode 100644 index 0000000000..68b1a62082 --- /dev/null +++ b/core/03-connection/client/cli/tx.go @@ -0,0 +1,348 @@ +package cli + +import ( + "fmt" + "io/ioutil" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/types/msgservice" + "github.com/cosmos/cosmos-sdk/version" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +const ( + flagVersionIdentifier = "version-identifier" + flagVersionFeatures = "version-features" + flagDelayPeriod = "delay-period" +) + +// NewConnectionOpenInitCmd defines the command to initialize a connection on +// chain A with a given counterparty chain B +func NewConnectionOpenInitCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "open-init [client-id] [counterparty-client-id] [path/to/counterparty_prefix.json]", + Short: "Initialize connection on chain A", + Long: `Initialize a connection on chain A with a given counterparty chain B. + - 'version-identifier' flag can be a single pre-selected version identifier to be used in the handshake. + - 'version-features' flag can be a list of features separated by commas to accompany the version identifier.`, + Example: fmt.Sprintf( + "%s tx %s %s open-init [client-id] [counterparty-client-id] [path/to/counterparty_prefix.json] --version-identifier=\"1.0\" --version-features=\"ORDER_UNORDERED\" --delay-period=500", + version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + clientID := args[0] + counterpartyClientID := args[1] + + counterpartyPrefix, err := utils.ParsePrefix(clientCtx.LegacyAmino, args[2]) + if err != nil { + return err + } + + var version *types.Version + versionIdentifier, _ := cmd.Flags().GetString(flagVersionIdentifier) + + if versionIdentifier != "" { + var features []string + + versionFeatures, _ := cmd.Flags().GetString(flagVersionFeatures) + if versionFeatures != "" { + features = strings.Split(versionFeatures, ",") + } + + version = types.NewVersion(versionIdentifier, features) + } + + delayPeriod, err := cmd.Flags().GetUint64(flagDelayPeriod) + if err != nil { + return err + } + + msg := types.NewMsgConnectionOpenInit( + clientID, counterpartyClientID, + counterpartyPrefix, version, delayPeriod, clientCtx.GetFromAddress(), + ) + + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ConnectionOpenInit(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + // NOTE: we should use empty default values since the user may not want to select a version + // at this step in the handshake. + cmd.Flags().String(flagVersionIdentifier, "", "version identifier to be used in the connection handshake version negotiation") + cmd.Flags().String(flagVersionFeatures, "", "version features list separated by commas without spaces. The features must function with the version identifier.") + cmd.Flags().Uint64(flagDelayPeriod, 0, "delay period that must pass before packet verification can pass against a consensus state") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewConnectionOpenTryCmd defines the command to relay a try open a connection on +// chain B +func NewConnectionOpenTryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: strings.TrimSpace(`open-try [connection-id] [client-id] +[counterparty-connection-id] [counterparty-client-id] [path/to/counterparty_prefix.json] [path/to/client_state.json] +[path/to/counterparty_version1.json,path/to/counterparty_version2.json...] [consensus-height] [proof-height] [path/to/proof_init.json] [path/to/proof_client.json] [path/to/proof_consensus.json]`), + Short: "initiate connection handshake between two chains", + Long: "Initialize a connection on chain A with a given counterparty chain B. Provide counterparty versions separated by commas", + Example: fmt.Sprintf( + `%s tx %s %s open-try connection-id] [client-id] \ +[counterparty-connection-id] [counterparty-client-id] [path/to/counterparty_prefix.json] [path/to/client_state.json]\ +[counterparty-versions] [consensus-height] [proof-height] [path/to/proof_init.json] [path/to/proof_client.json] [path/to/proof_consensus.json]`, + version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(12), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + connectionID := args[0] + clientID := args[1] + counterpartyConnectionID := args[2] + counterpartyClientID := args[3] + + counterpartyPrefix, err := utils.ParsePrefix(clientCtx.LegacyAmino, args[4]) + if err != nil { + return err + } + + counterpartyClient, err := utils.ParseClientState(clientCtx.LegacyAmino, args[5]) + if err != nil { + return err + } + + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + versionsStr := strings.Split(args[6], ",") + counterpartyVersions := make([]*types.Version, len(versionsStr)) + + for _, ver := range versionsStr { + + // attempt to unmarshal version + version := &types.Version{} + if err := cdc.UnmarshalJSON([]byte(ver), version); err != nil { + + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(ver) + if err != nil { + return errors.Wrap(err, "neither JSON input nor path to .json file for version were provided") + } + + if err := cdc.UnmarshalJSON(contents, version); err != nil { + return errors.Wrap(err, "error unmarshalling version file") + } + } + } + + consensusHeight, err := clienttypes.ParseHeight(args[7]) + if err != nil { + return err + } + proofHeight, err := clienttypes.ParseHeight(args[8]) + if err != nil { + return err + } + + proofInit, err := utils.ParseProof(clientCtx.LegacyAmino, args[9]) + if err != nil { + return err + } + + proofClient, err := utils.ParseProof(clientCtx.LegacyAmino, args[10]) + if err != nil { + return err + } + + proofConsensus, err := utils.ParseProof(clientCtx.LegacyAmino, args[11]) + if err != nil { + return err + } + + delayPeriod, err := cmd.Flags().GetUint64(flagDelayPeriod) + if err != nil { + return err + } + + msg := types.NewMsgConnectionOpenTry( + connectionID, clientID, counterpartyConnectionID, counterpartyClientID, + counterpartyClient, counterpartyPrefix, counterpartyVersions, delayPeriod, + proofInit, proofClient, proofConsensus, proofHeight, + consensusHeight, clientCtx.GetFromAddress(), + ) + + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ConnectionOpenTry(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + cmd.Flags().Uint64(flagDelayPeriod, 0, "delay period that must pass before packet verification can pass against a consensus state") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewConnectionOpenAckCmd defines the command to relay the acceptance of a +// connection open attempt from chain B to chain A +func NewConnectionOpenAckCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: `open-ack [connection-id] [counterparty-connection-id] [path/to/client_state.json] [consensus-height] [proof-height] + [path/to/proof_try.json] [path/to/proof_client.json] [path/to/proof_consensus.json] [version]`, + Short: "relay the acceptance of a connection open attempt", + Long: "Relay the acceptance of a connection open attempt from chain B to chain A", + Example: fmt.Sprintf( + `%s tx %s %s open-ack [connection-id] [counterparty-connection-id] [path/to/client_state.json] [consensus-height] [proof-height] + [path/to/proof_try.json] [path/to/proof_client.json] [path/to/proof_consensus.json] [version]`, + version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(9), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + connectionID := args[0] + counterpartyConnectionID := args[1] + + counterpartyClient, err := utils.ParseClientState(clientCtx.LegacyAmino, args[2]) + if err != nil { + return err + } + + consensusHeight, err := clienttypes.ParseHeight(args[3]) + if err != nil { + return err + } + proofHeight, err := clienttypes.ParseHeight(args[4]) + if err != nil { + return err + } + + proofTry, err := utils.ParseProof(clientCtx.LegacyAmino, args[5]) + if err != nil { + return err + } + + proofClient, err := utils.ParseProof(clientCtx.LegacyAmino, args[6]) + if err != nil { + return err + } + + proofConsensus, err := utils.ParseProof(clientCtx.LegacyAmino, args[7]) + if err != nil { + return err + } + + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + // attempt to unmarshal version + version := &types.Version{} + if err := cdc.UnmarshalJSON([]byte(args[8]), version); err != nil { + + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(args[8]) + if err != nil { + return errors.Wrap(err, "neither JSON input nor path to .json file for version were provided") + } + + if err := cdc.UnmarshalJSON(contents, version); err != nil { + return errors.Wrap(err, "error unmarshalling version file") + } + } + + msg := types.NewMsgConnectionOpenAck( + connectionID, counterpartyConnectionID, counterpartyClient, proofTry, proofClient, proofConsensus, proofHeight, + consensusHeight, version, clientCtx.GetFromAddress(), + ) + + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ConnectionOpenAck(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewConnectionOpenConfirmCmd defines the command to initialize a connection on +// chain A with a given counterparty chain B +func NewConnectionOpenConfirmCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "open-confirm [connection-id] [proof-height] [path/to/proof_ack.json]", + Short: "confirm to chain B that connection is open on chain A", + Long: "Confirm to chain B that connection is open on chain A", + Example: fmt.Sprintf( + "%s tx %s %s open-confirm [connection-id] [proof-height] [path/to/proof_ack.json]", + version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + connectionID := args[0] + proofHeight, err := clienttypes.ParseHeight(args[1]) + if err != nil { + return err + } + + proofAck, err := utils.ParseProof(clientCtx.LegacyAmino, args[2]) + if err != nil { + return err + } + + msg := types.NewMsgConnectionOpenConfirm( + connectionID, proofAck, proofHeight, clientCtx.GetFromAddress(), + ) + + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ConnectionOpenConfirm(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/core/03-connection/client/utils/utils.go b/core/03-connection/client/utils/utils.go new file mode 100644 index 0000000000..e1eb1ce00c --- /dev/null +++ b/core/03-connection/client/utils/utils.go @@ -0,0 +1,219 @@ +package utils + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/pkg/errors" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clientutils "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// QueryConnection returns a connection end. +// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise, +// it uses the gRPC query client. +func QueryConnection( + clientCtx client.Context, connectionID string, prove bool, +) (*types.QueryConnectionResponse, error) { + if prove { + return queryConnectionABCI(clientCtx, connectionID) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryConnectionRequest{ + ConnectionId: connectionID, + } + + return queryClient.Connection(context.Background(), req) +} + +func queryConnectionABCI(clientCtx client.Context, connectionID string) (*types.QueryConnectionResponse, error) { + key := host.ConnectionKey(connectionID) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + // check if connection exists + if len(value) == 0 { + return nil, sdkerrors.Wrap(types.ErrConnectionNotFound, connectionID) + } + + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + var connection types.ConnectionEnd + if err := cdc.UnmarshalBinaryBare(value, &connection); err != nil { + return nil, err + } + + return types.NewQueryConnectionResponse(connection, proofBz, proofHeight), nil +} + +// QueryClientConnections queries the connection paths registered for a particular client. +// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise, +// it uses the gRPC query client. +func QueryClientConnections( + clientCtx client.Context, clientID string, prove bool, +) (*types.QueryClientConnectionsResponse, error) { + if prove { + return queryClientConnectionsABCI(clientCtx, clientID) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryClientConnectionsRequest{ + ClientId: clientID, + } + + return queryClient.ClientConnections(context.Background(), req) +} + +func queryClientConnectionsABCI(clientCtx client.Context, clientID string) (*types.QueryClientConnectionsResponse, error) { + key := host.ClientConnectionsKey(clientID) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + // check if connection paths exist + if len(value) == 0 { + return nil, sdkerrors.Wrap(types.ErrClientConnectionPathsNotFound, clientID) + } + + var paths []string + if err := clientCtx.LegacyAmino.UnmarshalBinaryBare(value, &paths); err != nil { + return nil, err + } + + return types.NewQueryClientConnectionsResponse(paths, proofBz, proofHeight), nil +} + +// QueryConnectionClientState returns the ClientState of a connection end. If +// prove is true, it performs an ABCI store query in order to retrieve the +// merkle proof. Otherwise, it uses the gRPC query client. +func QueryConnectionClientState( + clientCtx client.Context, connectionID string, prove bool, +) (*types.QueryConnectionClientStateResponse, error) { + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryConnectionClientStateRequest{ + ConnectionId: connectionID, + } + + res, err := queryClient.ConnectionClientState(context.Background(), req) + if err != nil { + return nil, err + } + + if prove { + clientStateRes, err := clientutils.QueryClientStateABCI(clientCtx, res.IdentifiedClientState.ClientId) + if err != nil { + return nil, err + } + + // use client state returned from ABCI query in case query height differs + identifiedClientState := clienttypes.IdentifiedClientState{ + ClientId: res.IdentifiedClientState.ClientId, + ClientState: clientStateRes.ClientState, + } + + res = types.NewQueryConnectionClientStateResponse(identifiedClientState, clientStateRes.Proof, clientStateRes.ProofHeight) + } + + return res, nil +} + +// QueryConnectionConsensusState returns the ConsensusState of a connection end. If +// prove is true, it performs an ABCI store query in order to retrieve the +// merkle proof. Otherwise, it uses the gRPC query client. +func QueryConnectionConsensusState( + clientCtx client.Context, connectionID string, height clienttypes.Height, prove bool, +) (*types.QueryConnectionConsensusStateResponse, error) { + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryConnectionConsensusStateRequest{ + ConnectionId: connectionID, + RevisionNumber: height.RevisionNumber, + RevisionHeight: height.RevisionHeight, + } + + res, err := queryClient.ConnectionConsensusState(context.Background(), req) + if err != nil { + return nil, err + } + + if prove { + consensusStateRes, err := clientutils.QueryConsensusStateABCI(clientCtx, res.ClientId, height) + if err != nil { + return nil, err + } + + res = types.NewQueryConnectionConsensusStateResponse(res.ClientId, consensusStateRes.ConsensusState, height, consensusStateRes.Proof, consensusStateRes.ProofHeight) + } + + return res, nil +} + +// ParseClientState unmarshals a cmd input argument from a JSON string to a client state +// If the input is not a JSON, it looks for a path to the JSON file +func ParseClientState(cdc *codec.LegacyAmino, arg string) (exported.ClientState, error) { + var clientState exported.ClientState + if err := cdc.UnmarshalJSON([]byte(arg), &clientState); err != nil { + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(arg) + if err != nil { + return nil, errors.New("either JSON input nor path to .json file were provided") + } + if err := cdc.UnmarshalJSON(contents, &clientState); err != nil { + return nil, errors.Wrap(err, "error unmarshalling client state") + } + } + return clientState, nil +} + +// ParsePrefix unmarshals an cmd input argument from a JSON string to a commitment +// Prefix. If the input is not a JSON, it looks for a path to the JSON file. +func ParsePrefix(cdc *codec.LegacyAmino, arg string) (commitmenttypes.MerklePrefix, error) { + var prefix commitmenttypes.MerklePrefix + if err := cdc.UnmarshalJSON([]byte(arg), &prefix); err != nil { + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(arg) + if err != nil { + return commitmenttypes.MerklePrefix{}, errors.New("neither JSON input nor path to .json file were provided") + } + if err := cdc.UnmarshalJSON(contents, &prefix); err != nil { + return commitmenttypes.MerklePrefix{}, errors.Wrap(err, "error unmarshalling commitment prefix") + } + } + return prefix, nil +} + +// ParseProof unmarshals a cmd input argument from a JSON string to a commitment +// Proof. If the input is not a JSON, it looks for a path to the JSON file. It +// then marshals the commitment proof into a proto encoded byte array. +func ParseProof(cdc *codec.LegacyAmino, arg string) ([]byte, error) { + var merkleProof commitmenttypes.MerkleProof + if err := cdc.UnmarshalJSON([]byte(arg), &merkleProof); err != nil { + // check for file path if JSON input is not provided + contents, err := ioutil.ReadFile(arg) + if err != nil { + return nil, errors.New("neither JSON input nor path to .json file were provided") + } + if err := cdc.UnmarshalJSON(contents, &merkleProof); err != nil { + return nil, fmt.Errorf("error unmarshalling commitment proof: %w", err) + } + } + + return cdc.MarshalBinaryBare(&merkleProof) +} diff --git a/core/03-connection/genesis.go b/core/03-connection/genesis.go new file mode 100644 index 0000000000..a1bb30f1fe --- /dev/null +++ b/core/03-connection/genesis.go @@ -0,0 +1,28 @@ +package connection + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" +) + +// InitGenesis initializes the ibc connection submodule's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) { + for _, connection := range gs.Connections { + conn := types.NewConnectionEnd(connection.State, connection.ClientId, connection.Counterparty, connection.Versions, connection.DelayPeriod) + k.SetConnection(ctx, connection.Id, conn) + } + for _, connPaths := range gs.ClientConnectionPaths { + k.SetClientConnectionPaths(ctx, connPaths.ClientId, connPaths.Paths) + } + k.SetNextConnectionSequence(ctx, gs.NextConnectionSequence) +} + +// ExportGenesis returns the ibc connection submodule's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState { + return types.GenesisState{ + Connections: k.GetAllConnections(ctx), + ClientConnectionPaths: k.GetAllClientConnectionPaths(ctx), + } +} diff --git a/core/03-connection/keeper/grpc_query.go b/core/03-connection/keeper/grpc_query.go new file mode 100644 index 0000000000..62b1c00a34 --- /dev/null +++ b/core/03-connection/keeper/grpc_query.go @@ -0,0 +1,179 @@ +package keeper + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/query" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +var _ types.QueryServer = Keeper{} + +// Connection implements the Query/Connection gRPC method +func (q Keeper) Connection(c context.Context, req *types.QueryConnectionRequest) (*types.QueryConnectionResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := host.ConnectionIdentifierValidator(req.ConnectionId); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + connection, found := q.GetConnection(ctx, req.ConnectionId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrap(types.ErrConnectionNotFound, req.ConnectionId).Error(), + ) + } + + return &types.QueryConnectionResponse{ + Connection: &connection, + ProofHeight: clienttypes.GetSelfHeight(ctx), + }, nil +} + +// Connections implements the Query/Connections gRPC method +func (q Keeper) Connections(c context.Context, req *types.QueryConnectionsRequest) (*types.QueryConnectionsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + + connections := []*types.IdentifiedConnection{} + store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.KeyConnectionPrefix)) + + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + var result types.ConnectionEnd + if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil { + return err + } + + connectionID, err := host.ParseConnectionPath(string(key)) + if err != nil { + return err + } + + identifiedConnection := types.NewIdentifiedConnection(connectionID, result) + connections = append(connections, &identifiedConnection) + return nil + }) + + if err != nil { + return nil, err + } + + return &types.QueryConnectionsResponse{ + Connections: connections, + Pagination: pageRes, + Height: clienttypes.GetSelfHeight(ctx), + }, nil +} + +// ClientConnections implements the Query/ClientConnections gRPC method +func (q Keeper) ClientConnections(c context.Context, req *types.QueryClientConnectionsRequest) (*types.QueryClientConnectionsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := host.ClientIdentifierValidator(req.ClientId); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + clientConnectionPaths, found := q.GetClientConnectionPaths(ctx, req.ClientId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrap(types.ErrClientConnectionPathsNotFound, req.ClientId).Error(), + ) + } + + return &types.QueryClientConnectionsResponse{ + ConnectionPaths: clientConnectionPaths, + ProofHeight: clienttypes.GetSelfHeight(ctx), + }, nil +} + +// ConnectionClientState implements the Query/ConnectionClientState gRPC method +func (q Keeper) ConnectionClientState(c context.Context, req *types.QueryConnectionClientStateRequest) (*types.QueryConnectionClientStateResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := host.ConnectionIdentifierValidator(req.ConnectionId); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + + connection, found := q.GetConnection(ctx, req.ConnectionId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(types.ErrConnectionNotFound, "connection-id: %s", req.ConnectionId).Error(), + ) + } + + clientState, found := q.clientKeeper.GetClientState(ctx, connection.ClientId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(clienttypes.ErrClientNotFound, "client-id: %s", connection.ClientId).Error(), + ) + } + + identifiedClientState := clienttypes.NewIdentifiedClientState(connection.ClientId, clientState) + + height := clienttypes.GetSelfHeight(ctx) + return types.NewQueryConnectionClientStateResponse(identifiedClientState, nil, height), nil + +} + +// ConnectionConsensusState implements the Query/ConnectionConsensusState gRPC method +func (q Keeper) ConnectionConsensusState(c context.Context, req *types.QueryConnectionConsensusStateRequest) (*types.QueryConnectionConsensusStateResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := host.ConnectionIdentifierValidator(req.ConnectionId); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + + connection, found := q.GetConnection(ctx, req.ConnectionId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(types.ErrConnectionNotFound, "connection-id: %s", req.ConnectionId).Error(), + ) + } + + height := clienttypes.NewHeight(req.RevisionNumber, req.RevisionHeight) + consensusState, found := q.clientKeeper.GetClientConsensusState(ctx, connection.ClientId, height) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(clienttypes.ErrConsensusStateNotFound, "client-id: %s", connection.ClientId).Error(), + ) + } + + anyConsensusState, err := clienttypes.PackConsensusState(consensusState) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + proofHeight := clienttypes.GetSelfHeight(ctx) + return types.NewQueryConnectionConsensusStateResponse(connection.ClientId, anyConsensusState, height, nil, proofHeight), nil +} diff --git a/core/03-connection/keeper/grpc_query_test.go b/core/03-connection/keeper/grpc_query_test.go new file mode 100644 index 0000000000..14fdb425d9 --- /dev/null +++ b/core/03-connection/keeper/grpc_query_test.go @@ -0,0 +1,420 @@ +package keeper_test + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *KeeperTestSuite) TestQueryConnection() { + var ( + req *types.QueryConnectionRequest + expConnection types.ConnectionEnd + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + {"invalid connectionID", + func() { + req = &types.QueryConnectionRequest{} + }, + false, + }, + {"connection not found", + func() { + req = &types.QueryConnectionRequest{ + ConnectionId: ibctesting.InvalidID, + } + }, + false, + }, + { + "success", + func() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA := suite.chainA.GetFirstTestConnection(clientA, clientB) + connB := suite.chainB.GetFirstTestConnection(clientB, clientA) + + counterparty := types.NewCounterparty(clientB, connB.ID, suite.chainB.GetPrefix()) + expConnection = types.NewConnectionEnd(types.INIT, clientA, counterparty, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 500) + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, expConnection) + + req = &types.QueryConnectionRequest{ + ConnectionId: connA.ID, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.Connection(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(&expConnection, res.Connection) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryConnections() { + var ( + req *types.QueryConnectionsRequest + expConnections = []*types.IdentifiedConnection{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "empty pagination", + func() { + req = &types.QueryConnectionsRequest{} + }, + true, + }, + { + "success", + func() { + clientA, clientB, connA0, connB0 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + clientA1, clientB1, connA1, connB1 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + connA2, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + counterparty1 := types.NewCounterparty(clientB, connB0.ID, suite.chainB.GetPrefix()) + counterparty2 := types.NewCounterparty(clientB1, connB1.ID, suite.chainB.GetPrefix()) + // counterparty connection id is blank after open init + counterparty3 := types.NewCounterparty(clientB, "", suite.chainB.GetPrefix()) + + conn1 := types.NewConnectionEnd(types.OPEN, clientA, counterparty1, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) + conn2 := types.NewConnectionEnd(types.OPEN, clientA1, counterparty2, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) + conn3 := types.NewConnectionEnd(types.INIT, clientA, counterparty3, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) + + iconn1 := types.NewIdentifiedConnection(connA0.ID, conn1) + iconn2 := types.NewIdentifiedConnection(connA1.ID, conn2) + iconn3 := types.NewIdentifiedConnection(connA2.ID, conn3) + + expConnections = []*types.IdentifiedConnection{&iconn1, &iconn2, &iconn3} + + req = &types.QueryConnectionsRequest{ + Pagination: &query.PageRequest{ + Limit: 3, + CountTotal: true, + }, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.Connections(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expConnections, res.Connections) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryClientConnections() { + var ( + req *types.QueryClientConnectionsRequest + expPaths []string + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + {"invalid connectionID", + func() { + req = &types.QueryClientConnectionsRequest{} + }, + false, + }, + {"connection not found", + func() { + req = &types.QueryClientConnectionsRequest{ + ClientId: ibctesting.InvalidID, + } + }, + false, + }, + { + "success", + func() { + clientA, clientB, connA0, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + connA1, _ := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB) + expPaths = []string{connA0.ID, connA1.ID} + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), clientA, expPaths) + + req = &types.QueryClientConnectionsRequest{ + ClientId: clientA, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.ClientConnections(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expPaths, res.ConnectionPaths) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryConnectionClientState() { + var ( + req *types.QueryConnectionClientStateRequest + expIdentifiedClientState clienttypes.IdentifiedClientState + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid connection ID", + func() { + req = &types.QueryConnectionClientStateRequest{ + ConnectionId: "", + } + }, + false, + }, + { + "connection not found", + func() { + req = &types.QueryConnectionClientStateRequest{ + ConnectionId: "test-connection-id", + } + }, + false, + }, + { + "client state not found", + func() { + _, _, connA, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + // set connection to empty so clientID is empty + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, types.ConnectionEnd{}) + + req = &types.QueryConnectionClientStateRequest{ + ConnectionId: connA.ID, + } + }, false, + }, + { + "success", + func() { + clientA, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + expClientState := suite.chainA.GetClientState(clientA) + expIdentifiedClientState = clienttypes.NewIdentifiedClientState(clientA, expClientState) + + req = &types.QueryConnectionClientStateRequest{ + ConnectionId: connA.ID, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.ConnectionClientState(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(&expIdentifiedClientState, res.IdentifiedClientState) + + // ensure UnpackInterfaces is defined + cachedValue := res.IdentifiedClientState.ClientState.GetCachedValue() + suite.Require().NotNil(cachedValue) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryConnectionConsensusState() { + var ( + req *types.QueryConnectionConsensusStateRequest + expConsensusState exported.ConsensusState + expClientID string + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid connection ID", + func() { + req = &types.QueryConnectionConsensusStateRequest{ + ConnectionId: "", + RevisionNumber: 0, + RevisionHeight: 1, + } + }, + false, + }, + { + "connection not found", + func() { + req = &types.QueryConnectionConsensusStateRequest{ + ConnectionId: "test-connection-id", + RevisionNumber: 0, + RevisionHeight: 1, + } + }, + false, + }, + { + "consensus state not found", + func() { + _, _, connA, _, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + req = &types.QueryConnectionConsensusStateRequest{ + ConnectionId: connA.ID, + RevisionNumber: 0, + RevisionHeight: uint64(suite.chainA.GetContext().BlockHeight()), // use current height + } + }, false, + }, + { + "success", + func() { + clientA, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + clientState := suite.chainA.GetClientState(clientA) + expConsensusState, _ = suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight()) + suite.Require().NotNil(expConsensusState) + expClientID = clientA + + req = &types.QueryConnectionConsensusStateRequest{ + ConnectionId: connA.ID, + RevisionNumber: clientState.GetLatestHeight().GetRevisionNumber(), + RevisionHeight: clientState.GetLatestHeight().GetRevisionHeight(), + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.ConnectionConsensusState(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + consensusState, err := clienttypes.UnpackConsensusState(res.ConsensusState) + suite.Require().NoError(err) + suite.Require().Equal(expConsensusState, consensusState) + suite.Require().Equal(expClientID, res.ClientId) + + // ensure UnpackInterfaces is defined + cachedValue := res.ConsensusState.GetCachedValue() + suite.Require().NotNil(cachedValue) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/core/03-connection/keeper/handshake.go b/core/03-connection/keeper/handshake.go new file mode 100644 index 0000000000..b8f7466f15 --- /dev/null +++ b/core/03-connection/keeper/handshake.go @@ -0,0 +1,342 @@ +package keeper + +import ( + "bytes" + + "github.com/gogo/protobuf/proto" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// ConnOpenInit initialises a connection attempt on chain A. The generated connection identifier +// is returned. +// +// NOTE: Msg validation verifies the supplied identifiers and ensures that the counterparty +// connection identifier is empty. +func (k Keeper) ConnOpenInit( + ctx sdk.Context, + clientID string, + counterparty types.Counterparty, // counterpartyPrefix, counterpartyClientIdentifier + version *types.Version, + delayPeriod uint64, +) (string, error) { + versions := types.GetCompatibleVersions() + if version != nil { + if !types.IsSupportedVersion(version) { + return "", sdkerrors.Wrap(types.ErrInvalidVersion, "version is not supported") + } + + versions = []exported.Version{version} + } + + // connection defines chain A's ConnectionEnd + connectionID := k.GenerateConnectionIdentifier(ctx) + connection := types.NewConnectionEnd(types.INIT, clientID, counterparty, types.ExportedVersionsToProto(versions), delayPeriod) + k.SetConnection(ctx, connectionID, connection) + + if err := k.addConnectionToClient(ctx, clientID, connectionID); err != nil { + return "", err + } + + k.Logger(ctx).Info("connection state updated", "connection-id", connectionID, "previous-state", "NONE", "new-state", "INIT") + + defer func() { + telemetry.IncrCounter(1, "ibc", "connection", "open-init") + }() + + return connectionID, nil +} + +// ConnOpenTry relays notice of a connection attempt on chain A to chain B (this +// code is executed on chain B). +// +// NOTE: +// - Here chain A acts as the counterparty +// - Identifiers are checked on msg validation +func (k Keeper) ConnOpenTry( + ctx sdk.Context, + previousConnectionID string, // previousIdentifier + counterparty types.Counterparty, // counterpartyConnectionIdentifier, counterpartyPrefix and counterpartyClientIdentifier + delayPeriod uint64, + clientID string, // clientID of chainA + clientState exported.ClientState, // clientState that chainA has for chainB + counterpartyVersions []exported.Version, // supported versions of chain A + proofInit []byte, // proof that chainA stored connectionEnd in state (on ConnOpenInit) + proofClient []byte, // proof that chainA stored a light client of chainB + proofConsensus []byte, // proof that chainA stored chainB's consensus state at consensus height + proofHeight exported.Height, // height at which relayer constructs proof of A storing connectionEnd in state + consensusHeight exported.Height, // latest height of chain B which chain A has stored in its chain B client +) (string, error) { + var ( + connectionID string + previousConnection types.ConnectionEnd + found bool + ) + + // empty connection identifier indicates continuing a previous connection handshake + if previousConnectionID != "" { + // ensure that the previous connection exists + previousConnection, found = k.GetConnection(ctx, previousConnectionID) + if !found { + return "", sdkerrors.Wrapf(types.ErrConnectionNotFound, "previous connection does not exist for supplied previous connectionID %s", previousConnectionID) + } + + // ensure that the existing connection's + // counterparty is chainA and connection is on INIT stage. + // Check that existing connection versions for initialized connection is equal to compatible + // versions for this chain. + // ensure that existing connection's delay period is the same as desired delay period. + if !(previousConnection.Counterparty.ConnectionId == "" && + bytes.Equal(previousConnection.Counterparty.Prefix.Bytes(), counterparty.Prefix.Bytes()) && + previousConnection.ClientId == clientID && + previousConnection.Counterparty.ClientId == counterparty.ClientId && + previousConnection.DelayPeriod == delayPeriod) { + return "", sdkerrors.Wrap(types.ErrInvalidConnection, "connection fields mismatch previous connection fields") + } + + if !(previousConnection.State == types.INIT) { + return "", sdkerrors.Wrapf(types.ErrInvalidConnectionState, "previous connection state is in state %s, expected INIT", previousConnection.State) + } + + // continue with previous connection + connectionID = previousConnectionID + + } else { + // generate a new connection + connectionID = k.GenerateConnectionIdentifier(ctx) + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + if consensusHeight.GTE(selfHeight) { + return "", sdkerrors.Wrapf( + sdkerrors.ErrInvalidHeight, + "consensus height is greater than or equal to the current block height (%s >= %s)", consensusHeight, selfHeight, + ) + } + + // validate client parameters of a chainB client stored on chainA + if err := k.clientKeeper.ValidateSelfClient(ctx, clientState); err != nil { + return "", err + } + + expectedConsensusState, found := k.clientKeeper.GetSelfConsensusState(ctx, consensusHeight) + if !found { + return "", sdkerrors.Wrap(clienttypes.ErrSelfConsensusStateNotFound, consensusHeight.String()) + } + + // expectedConnection defines Chain A's ConnectionEnd + // NOTE: chain A's counterparty is chain B (i.e where this code is executed) + // NOTE: chainA and chainB must have the same delay period + prefix := k.GetCommitmentPrefix() + expectedCounterparty := types.NewCounterparty(clientID, "", commitmenttypes.NewMerklePrefix(prefix.Bytes())) + expectedConnection := types.NewConnectionEnd(types.INIT, counterparty.ClientId, expectedCounterparty, types.ExportedVersionsToProto(counterpartyVersions), delayPeriod) + + supportedVersions := types.GetCompatibleVersions() + if len(previousConnection.Versions) != 0 { + supportedVersions = previousConnection.GetVersions() + } + + // chain B picks a version from Chain A's available versions that is compatible + // with Chain B's supported IBC versions. PickVersion will select the intersection + // of the supported versions and the counterparty versions. + version, err := types.PickVersion(supportedVersions, counterpartyVersions) + if err != nil { + return "", err + } + + // connection defines chain B's ConnectionEnd + connection := types.NewConnectionEnd(types.TRYOPEN, clientID, counterparty, []*types.Version{version}, delayPeriod) + + // Check that ChainA committed expectedConnectionEnd to its state + if err := k.VerifyConnectionState( + ctx, connection, proofHeight, proofInit, counterparty.ConnectionId, + expectedConnection, + ); err != nil { + return "", err + } + + // Check that ChainA stored the clientState provided in the msg + if err := k.VerifyClientState(ctx, connection, proofHeight, proofClient, clientState); err != nil { + return "", err + } + + // Check that ChainA stored the correct ConsensusState of chainB at the given consensusHeight + if err := k.VerifyClientConsensusState( + ctx, connection, proofHeight, consensusHeight, proofConsensus, expectedConsensusState, + ); err != nil { + return "", err + } + + // store connection in chainB state + if err := k.addConnectionToClient(ctx, clientID, connectionID); err != nil { + return "", sdkerrors.Wrapf(err, "failed to add connection with ID %s to client with ID %s", connectionID, clientID) + } + + k.SetConnection(ctx, connectionID, connection) + k.Logger(ctx).Info("connection state updated", "connection-id", connectionID, "previous-state", previousConnection.State.String(), "new-state", "TRYOPEN") + + defer func() { + telemetry.IncrCounter(1, "ibc", "connection", "open-try") + }() + + return connectionID, nil +} + +// ConnOpenAck relays acceptance of a connection open attempt from chain B back +// to chain A (this code is executed on chain A). +// +// NOTE: Identifiers are checked on msg validation. +func (k Keeper) ConnOpenAck( + ctx sdk.Context, + connectionID string, + clientState exported.ClientState, // client state for chainA on chainB + version *types.Version, // version that ChainB chose in ConnOpenTry + counterpartyConnectionID string, + proofTry []byte, // proof that connectionEnd was added to ChainB state in ConnOpenTry + proofClient []byte, // proof of client state on chainB for chainA + proofConsensus []byte, // proof that chainB has stored ConsensusState of chainA on its client + proofHeight exported.Height, // height that relayer constructed proofTry + consensusHeight exported.Height, // latest height of chainA that chainB has stored on its chainA client +) error { + // Check that chainB client hasn't stored invalid height + selfHeight := clienttypes.GetSelfHeight(ctx) + if consensusHeight.GTE(selfHeight) { + return sdkerrors.Wrapf( + sdkerrors.ErrInvalidHeight, + "consensus height is greater than or equal to the current block height (%s >= %s)", consensusHeight, selfHeight, + ) + } + + // Retrieve connection + connection, found := k.GetConnection(ctx, connectionID) + if !found { + return sdkerrors.Wrap(types.ErrConnectionNotFound, connectionID) + } + + // Verify the provided version against the previously set connection state + switch { + // connection on ChainA must be in INIT or TRYOPEN + case connection.State != types.INIT && connection.State != types.TRYOPEN: + return sdkerrors.Wrapf( + types.ErrInvalidConnectionState, + "connection state is not INIT or TRYOPEN (got %s)", connection.State.String(), + ) + + // if the connection is INIT then the provided version must be supproted + case connection.State == types.INIT && !types.IsSupportedVersion(version): + return sdkerrors.Wrapf( + types.ErrInvalidConnectionState, + "connection state is in INIT but the provided version is not supported %s", version, + ) + + // if the connection is in TRYOPEN then the version must be the only set version in the + // retreived connection state. + case connection.State == types.TRYOPEN && (len(connection.Versions) != 1 || !proto.Equal(connection.Versions[0], version)): + return sdkerrors.Wrapf( + types.ErrInvalidConnectionState, + "connection state is in TRYOPEN but the provided version (%s) is not set in the previous connection versions %s", version, connection.Versions, + ) + } + + // validate client parameters of a chainA client stored on chainB + if err := k.clientKeeper.ValidateSelfClient(ctx, clientState); err != nil { + return err + } + + // Retrieve chainA's consensus state at consensusheight + expectedConsensusState, found := k.clientKeeper.GetSelfConsensusState(ctx, consensusHeight) + if !found { + return clienttypes.ErrSelfConsensusStateNotFound + } + + prefix := k.GetCommitmentPrefix() + expectedCounterparty := types.NewCounterparty(connection.ClientId, connectionID, commitmenttypes.NewMerklePrefix(prefix.Bytes())) + expectedConnection := types.NewConnectionEnd(types.TRYOPEN, connection.Counterparty.ClientId, expectedCounterparty, []*types.Version{version}, connection.DelayPeriod) + + // Ensure that ChainB stored expected connectionEnd in its state during ConnOpenTry + if err := k.VerifyConnectionState( + ctx, connection, proofHeight, proofTry, counterpartyConnectionID, + expectedConnection, + ); err != nil { + return err + } + + // Check that ChainB stored the clientState provided in the msg + if err := k.VerifyClientState(ctx, connection, proofHeight, proofClient, clientState); err != nil { + return err + } + + // Ensure that ChainB has stored the correct ConsensusState for chainA at the consensusHeight + if err := k.VerifyClientConsensusState( + ctx, connection, proofHeight, consensusHeight, proofConsensus, expectedConsensusState, + ); err != nil { + return err + } + + k.Logger(ctx).Info("connection state updated", "connection-id", connectionID, "previous-state", connection.State.String(), "new-state", "OPEN") + + defer func() { + telemetry.IncrCounter(1, "ibc", "connection", "open-ack") + }() + + // Update connection state to Open + connection.State = types.OPEN + connection.Versions = []*types.Version{version} + connection.Counterparty.ConnectionId = counterpartyConnectionID + k.SetConnection(ctx, connectionID, connection) + return nil +} + +// ConnOpenConfirm confirms opening of a connection on chain A to chain B, after +// which the connection is open on both chains (this code is executed on chain B). +// +// NOTE: Identifiers are checked on msg validation. +func (k Keeper) ConnOpenConfirm( + ctx sdk.Context, + connectionID string, + proofAck []byte, // proof that connection opened on ChainA during ConnOpenAck + proofHeight exported.Height, // height that relayer constructed proofAck +) error { + // Retrieve connection + connection, found := k.GetConnection(ctx, connectionID) + if !found { + return sdkerrors.Wrap(types.ErrConnectionNotFound, connectionID) + } + + // Check that connection state on ChainB is on state: TRYOPEN + if connection.State != types.TRYOPEN { + return sdkerrors.Wrapf( + types.ErrInvalidConnectionState, + "connection state is not TRYOPEN (got %s)", connection.State.String(), + ) + } + + prefix := k.GetCommitmentPrefix() + expectedCounterparty := types.NewCounterparty(connection.ClientId, connectionID, commitmenttypes.NewMerklePrefix(prefix.Bytes())) + expectedConnection := types.NewConnectionEnd(types.OPEN, connection.Counterparty.ClientId, expectedCounterparty, connection.Versions, connection.DelayPeriod) + + // Check that connection on ChainA is open + if err := k.VerifyConnectionState( + ctx, connection, proofHeight, proofAck, connection.Counterparty.ConnectionId, + expectedConnection, + ); err != nil { + return err + } + + // Update ChainB's connection to Open + connection.State = types.OPEN + k.SetConnection(ctx, connectionID, connection) + k.Logger(ctx).Info("connection state updated", "connection-id", connectionID, "previous-state", "TRYOPEN", "new-state", "OPEN") + + defer func() { + telemetry.IncrCounter(1, "ibc", "connection", "open-confirm") + }() + + return nil +} diff --git a/core/03-connection/keeper/handshake_test.go b/core/03-connection/keeper/handshake_test.go new file mode 100644 index 0000000000..101c061a75 --- /dev/null +++ b/core/03-connection/keeper/handshake_test.go @@ -0,0 +1,701 @@ +package keeper_test + +import ( + "time" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" +) + +// TestConnOpenInit - chainA initializes (INIT state) a connection with +// chainB which is yet UNINITIALIZED +func (suite *KeeperTestSuite) TestConnOpenInit() { + var ( + clientA string + clientB string + version *types.Version + delayPeriod uint64 + emptyConnBID bool + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + {"success", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + }, true}, + {"success with empty counterparty identifier", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + emptyConnBID = true + }, true}, + {"success with non empty version", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + version = types.ExportedVersionsToProto(types.GetCompatibleVersions())[0] + }, true}, + {"success with non zero delayPeriod", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + delayPeriod = uint64(time.Hour.Nanoseconds()) + }, true}, + + {"invalid version", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + version = &types.Version{} + }, false}, + {"couldn't add connection to client", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + // set clientA to invalid client identifier + clientA = "clientidentifier" + }, false}, + } + + for _, tc := range testCases { + tc := tc + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + emptyConnBID = false // must be explicitly changed + version = nil // must be explicitly changed + + tc.malleate() + + connB := suite.chainB.GetFirstTestConnection(clientB, clientA) + if emptyConnBID { + connB.ID = "" + } + counterparty := types.NewCounterparty(clientB, connB.ID, suite.chainB.GetPrefix()) + + connectionID, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.ConnOpenInit(suite.chainA.GetContext(), clientA, counterparty, version, delayPeriod) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().Equal(types.FormatConnectionIdentifier(0), connectionID) + } else { + suite.Require().Error(err) + suite.Require().Equal("", connectionID) + } + }) + } +} + +// TestConnOpenTry - chainB calls ConnOpenTry to verify the state of +// connection on chainA is INIT +func (suite *KeeperTestSuite) TestConnOpenTry() { + var ( + clientA string + clientB string + delayPeriod uint64 + previousConnectionID string + versions []exported.Version + consensusHeight exported.Height + counterpartyClient exported.ClientState + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + {"success", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + }, true}, + {"success with crossing hellos", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, connB, err := suite.coordinator.ConnOpenInitOnBothChains(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + previousConnectionID = connB.ID + }, true}, + {"success with delay period", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + delayPeriod = uint64(time.Hour.Nanoseconds()) + + // set delay period on counterparty to non-zero value + conn := suite.chainA.GetConnection(connA) + conn.DelayPeriod = delayPeriod + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, conn) + + // commit in order for proof to return correct value + suite.coordinator.CommitBlock(suite.chainA) + suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + }, true}, + {"invalid counterparty client", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + // Set an invalid client of chainA on chainB + tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClient.ChainId = "wrongchainid" + + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient) + }, false}, + {"consensus height >= latest height", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + consensusHeight = clienttypes.GetSelfHeight(suite.chainB.GetContext()) + }, false}, + {"self consensus state not found", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + consensusHeight = clienttypes.NewHeight(0, 1) + }, false}, + {"counterparty versions is empty", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + versions = nil + }, false}, + {"counterparty versions don't have a match", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + version := types.NewVersion("0.0", nil) + versions = []exported.Version{version} + }, false}, + {"connection state verification failed", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + // chainA connection not created + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + }, false}, + {"client state verification failed", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + // modify counterparty client without setting in store so it still passes validate but fails proof verification + tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClient.LatestHeight = tmClient.LatestHeight.Increment().(clienttypes.Height) + }, false}, + {"consensus state verification failed", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + // give chainA wrong consensus state for chainB + consState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + tmConsState, ok := consState.(*ibctmtypes.ConsensusState) + suite.Require().True(ok) + + tmConsState.Timestamp = time.Now() + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientA, counterpartyClient.GetLatestHeight(), tmConsState) + + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + }, false}, + {"invalid previous connection is in TRYOPEN", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + // open init chainA + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // open try chainB + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + previousConnectionID = connB.ID + }, false}, + {"invalid previous connection has invalid versions", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + // open init chainA + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // open try chainB + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + // modify connB to be in INIT with incorrect versions + connection, found := suite.chainB.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainB.GetContext(), connB.ID) + suite.Require().True(found) + + connection.State = types.INIT + connection.Versions = []*types.Version{{}} + + suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection) + + err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint) + suite.Require().NoError(err) + + // retrieve client state of chainA to pass as counterpartyClient + counterpartyClient = suite.chainA.GetClientState(clientA) + + previousConnectionID = connB.ID + }, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + consensusHeight = clienttypes.ZeroHeight() // must be explicitly changed in malleate + versions = types.GetCompatibleVersions() // must be explicitly changed in malleate + previousConnectionID = "" + + tc.malleate() + + connA := suite.chainA.GetFirstTestConnection(clientA, clientB) + counterparty := types.NewCounterparty(clientA, connA.ID, suite.chainA.GetPrefix()) + + connectionKey := host.ConnectionKey(connA.ID) + proofInit, proofHeight := suite.chainA.QueryProof(connectionKey) + + if consensusHeight.IsZero() { + // retrieve consensus state height to provide proof for + consensusHeight = counterpartyClient.GetLatestHeight() + } + consensusKey := host.FullConsensusStateKey(clientA, consensusHeight) + proofConsensus, _ := suite.chainA.QueryProof(consensusKey) + + // retrieve proof of counterparty clientstate on chainA + clientKey := host.FullClientStateKey(clientA) + proofClient, _ := suite.chainA.QueryProof(clientKey) + + connectionID, err := suite.chainB.App.IBCKeeper.ConnectionKeeper.ConnOpenTry( + suite.chainB.GetContext(), previousConnectionID, counterparty, delayPeriod, clientB, counterpartyClient, + versions, proofInit, proofClient, proofConsensus, + proofHeight, consensusHeight, + ) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().Equal(types.FormatConnectionIdentifier(0), connectionID) + } else { + suite.Require().Error(err) + suite.Require().Equal("", connectionID) + } + }) + } +} + +// TestConnOpenAck - Chain A (ID #1) calls TestConnOpenAck to acknowledge (ACK state) +// the initialization (TRYINIT) of the connection on Chain B (ID #2). +func (suite *KeeperTestSuite) TestConnOpenAck() { + var ( + clientA string + clientB string + consensusHeight exported.Height + version *types.Version + counterpartyClient exported.ClientState + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + {"success", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + }, true}, + {"success from tryopen", func() { + // chainA is in TRYOPEN, chainB is in TRYOPEN + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA) + suite.Require().NoError(err) + + err = suite.coordinator.ConnOpenTry(suite.chainA, suite.chainB, connA, connB) + suite.Require().NoError(err) + + // set chainB to TRYOPEN + connection := suite.chainB.GetConnection(connB) + connection.State = types.TRYOPEN + connection.Counterparty.ConnectionId = connA.ID + suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection) + // update clientB so state change is committed + suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint) + + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + }, true}, + {"invalid counterparty client", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + // Set an invalid client of chainA on chainB + tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClient.ChainId = "wrongchainid" + + suite.chainB.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainB.GetContext(), clientB, tmClient) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + }, false}, + {"consensus height >= latest height", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + consensusHeight = clienttypes.GetSelfHeight(suite.chainA.GetContext()) + }, false}, + {"connection not found", func() { + // connections are never created + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + }, false}, + {"invalid counterparty connection ID", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + // modify connB to set counterparty connection identifier to wrong identifier + connection, found := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID) + suite.Require().True(found) + + connection.Counterparty.ConnectionId = "badconnectionid" + + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connection) + + err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + err = suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint) + suite.Require().NoError(err) + }, false}, + {"connection state is not INIT", func() { + // connection state is already OPEN on chainA + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + err = suite.coordinator.ConnOpenAck(suite.chainA, suite.chainB, connA, connB) + suite.Require().NoError(err) + }, false}, + {"connection is in INIT but the proposed version is invalid", func() { + // chainA is in INIT, chainB is in TRYOPEN + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + version = types.NewVersion("2.0", nil) + }, false}, + {"connection is in TRYOPEN but the set version in the connection is invalid", func() { + // chainA is in TRYOPEN, chainB is in TRYOPEN + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA) + suite.Require().NoError(err) + + err = suite.coordinator.ConnOpenTry(suite.chainA, suite.chainB, connA, connB) + suite.Require().NoError(err) + + // set chainB to TRYOPEN + connection := suite.chainB.GetConnection(connB) + connection.State = types.TRYOPEN + suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainB.GetContext(), connB.ID, connection) + + // update clientB so state change is committed + suite.coordinator.UpdateClient(suite.chainB, suite.chainA, clientB, exported.Tendermint) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + version = types.NewVersion("2.0", nil) + }, false}, + {"incompatible IBC versions", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + // set version to a non-compatible version + version = types.NewVersion("2.0", nil) + }, false}, + {"empty version", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + version = &types.Version{} + }, false}, + {"feature set verification failed - unsupported feature", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + version = types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_ORDERED", "ORDER_UNORDERED", "ORDER_DAG"}) + }, false}, + {"self consensus state not found", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + consensusHeight = clienttypes.NewHeight(0, 1) + }, false}, + {"connection state verification failed", func() { + // chainB connection is not in INIT + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, _, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + }, false}, + {"client state verification failed", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + // modify counterparty client without setting in store so it still passes validate but fails proof verification + tmClient, ok := counterpartyClient.(*ibctmtypes.ClientState) + suite.Require().True(ok) + tmClient.LatestHeight = tmClient.LatestHeight.Increment().(clienttypes.Height) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + }, false}, + {"consensus state verification failed", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // retrieve client state of chainB to pass as counterpartyClient + counterpartyClient = suite.chainB.GetClientState(clientB) + + // give chainB wrong consensus state for chainA + consState, found := suite.chainB.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainB.GetContext(), clientB) + suite.Require().True(found) + + tmConsState, ok := consState.(*ibctmtypes.ConsensusState) + suite.Require().True(ok) + + tmConsState.Timestamp = time.Now() + suite.chainB.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainB.GetContext(), clientB, counterpartyClient.GetLatestHeight(), tmConsState) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + }, false}, + } + + for _, tc := range testCases { + tc := tc + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + version = types.ExportedVersionsToProto(types.GetCompatibleVersions())[0] // must be explicitly changed in malleate + consensusHeight = clienttypes.ZeroHeight() // must be explicitly changed in malleate + + tc.malleate() + + connA := suite.chainA.GetFirstTestConnection(clientA, clientB) + connB := suite.chainB.GetFirstTestConnection(clientB, clientA) + + connectionKey := host.ConnectionKey(connB.ID) + proofTry, proofHeight := suite.chainB.QueryProof(connectionKey) + + if consensusHeight.IsZero() { + // retrieve consensus state height to provide proof for + clientState := suite.chainB.GetClientState(clientB) + consensusHeight = clientState.GetLatestHeight() + } + consensusKey := host.FullConsensusStateKey(clientB, consensusHeight) + proofConsensus, _ := suite.chainB.QueryProof(consensusKey) + + // retrieve proof of counterparty clientstate on chainA + clientKey := host.FullClientStateKey(clientB) + proofClient, _ := suite.chainB.QueryProof(clientKey) + + err := suite.chainA.App.IBCKeeper.ConnectionKeeper.ConnOpenAck( + suite.chainA.GetContext(), connA.ID, counterpartyClient, version, connB.ID, + proofTry, proofClient, proofConsensus, proofHeight, consensusHeight, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestConnOpenConfirm - chainB calls ConnOpenConfirm to confirm that +// chainA state is now OPEN. +func (suite *KeeperTestSuite) TestConnOpenConfirm() { + var ( + clientA string + clientB string + ) + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + {"success", func() { + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + + err = suite.coordinator.ConnOpenAck(suite.chainA, suite.chainB, connA, connB) + suite.Require().NoError(err) + }, true}, + {"connection not found", func() { + // connections are never created + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + }, false}, + {"chain B's connection state is not TRYOPEN", func() { + // connections are OPEN + clientA, clientB, _, _ = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + }, false}, + {"connection state verification failed", func() { + // chainA is in INIT + clientA, clientB = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.ConnOpenTry(suite.chainB, suite.chainA, connB, connA) + suite.Require().NoError(err) + }, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + + tc.malleate() + + connA := suite.chainA.GetFirstTestConnection(clientA, clientB) + connB := suite.chainB.GetFirstTestConnection(clientB, clientA) + + connectionKey := host.ConnectionKey(connA.ID) + proofAck, proofHeight := suite.chainA.QueryProof(connectionKey) + + err := suite.chainB.App.IBCKeeper.ConnectionKeeper.ConnOpenConfirm( + suite.chainB.GetContext(), connB.ID, proofAck, proofHeight, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/core/03-connection/keeper/keeper.go b/core/03-connection/keeper/keeper.go new file mode 100644 index 0000000000..6637268687 --- /dev/null +++ b/core/03-connection/keeper/keeper.go @@ -0,0 +1,198 @@ +package keeper + +import ( + "github.com/tendermint/tendermint/libs/log" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// Keeper defines the IBC connection keeper +type Keeper struct { + // implements gRPC QueryServer interface + types.QueryServer + + storeKey sdk.StoreKey + cdc codec.BinaryMarshaler + clientKeeper types.ClientKeeper +} + +// NewKeeper creates a new IBC connection Keeper instance +func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, ck types.ClientKeeper) Keeper { + return Keeper{ + storeKey: key, + cdc: cdc, + clientKeeper: ck, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName) +} + +// GetCommitmentPrefix returns the IBC connection store prefix as a commitment +// Prefix +func (k Keeper) GetCommitmentPrefix() exported.Prefix { + return commitmenttypes.NewMerklePrefix([]byte(k.storeKey.Name())) +} + +// GenerateConnectionIdentifier returns the next connection identifier. +func (k Keeper) GenerateConnectionIdentifier(ctx sdk.Context) string { + nextConnSeq := k.GetNextConnectionSequence(ctx) + connectionID := types.FormatConnectionIdentifier(nextConnSeq) + + nextConnSeq++ + k.SetNextConnectionSequence(ctx, nextConnSeq) + return connectionID +} + +// GetConnection returns a connection with a particular identifier +func (k Keeper) GetConnection(ctx sdk.Context, connectionID string) (types.ConnectionEnd, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.ConnectionKey(connectionID)) + if bz == nil { + return types.ConnectionEnd{}, false + } + + var connection types.ConnectionEnd + k.cdc.MustUnmarshalBinaryBare(bz, &connection) + + return connection, true +} + +// SetConnection sets a connection to the store +func (k Keeper) SetConnection(ctx sdk.Context, connectionID string, connection types.ConnectionEnd) { + store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshalBinaryBare(&connection) + store.Set(host.ConnectionKey(connectionID), bz) +} + +// GetTimestampAtHeight returns the timestamp in nanoseconds of the consensus state at the +// given height. +func (k Keeper) GetTimestampAtHeight(ctx sdk.Context, connection types.ConnectionEnd, height exported.Height) (uint64, error) { + consensusState, found := k.clientKeeper.GetClientConsensusState( + ctx, connection.GetClientID(), height, + ) + + if !found { + return 0, sdkerrors.Wrapf( + clienttypes.ErrConsensusStateNotFound, + "clientID (%s), height (%s)", connection.GetClientID(), height, + ) + } + + return consensusState.GetTimestamp(), nil +} + +// GetClientConnectionPaths returns all the connection paths stored under a +// particular client +func (k Keeper) GetClientConnectionPaths(ctx sdk.Context, clientID string) ([]string, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.ClientConnectionsKey(clientID)) + if bz == nil { + return nil, false + } + + var clientPaths types.ClientPaths + k.cdc.MustUnmarshalBinaryBare(bz, &clientPaths) + return clientPaths.Paths, true +} + +// SetClientConnectionPaths sets the connections paths for client +func (k Keeper) SetClientConnectionPaths(ctx sdk.Context, clientID string, paths []string) { + store := ctx.KVStore(k.storeKey) + clientPaths := types.ClientPaths{Paths: paths} + bz := k.cdc.MustMarshalBinaryBare(&clientPaths) + store.Set(host.ClientConnectionsKey(clientID), bz) +} + +// GetNextConnectionSequence gets the next connection sequence from the store. +func (k Keeper) GetNextConnectionSequence(ctx sdk.Context) uint64 { + store := ctx.KVStore(k.storeKey) + bz := store.Get([]byte(types.KeyNextConnectionSequence)) + if bz == nil { + panic("next connection sequence is nil") + } + + return sdk.BigEndianToUint64(bz) +} + +// SetNextConnectionSequence sets the next connection sequence to the store. +func (k Keeper) SetNextConnectionSequence(ctx sdk.Context, sequence uint64) { + store := ctx.KVStore(k.storeKey) + bz := sdk.Uint64ToBigEndian(sequence) + store.Set([]byte(types.KeyNextConnectionSequence), bz) +} + +// GetAllClientConnectionPaths returns all stored clients connection id paths. It +// will ignore the clients that haven't initialized a connection handshake since +// no paths are stored. +func (k Keeper) GetAllClientConnectionPaths(ctx sdk.Context) []types.ConnectionPaths { + var allConnectionPaths []types.ConnectionPaths + k.clientKeeper.IterateClients(ctx, func(clientID string, cs exported.ClientState) bool { + paths, found := k.GetClientConnectionPaths(ctx, clientID) + if !found { + // continue when connection handshake is not initialized + return false + } + connPaths := types.NewConnectionPaths(clientID, paths) + allConnectionPaths = append(allConnectionPaths, connPaths) + return false + }) + + return allConnectionPaths +} + +// IterateConnections provides an iterator over all ConnectionEnd objects. +// For each ConnectionEnd, cb will be called. If the cb returns true, the +// iterator will close and stop. +func (k Keeper) IterateConnections(ctx sdk.Context, cb func(types.IdentifiedConnection) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyConnectionPrefix)) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + var connection types.ConnectionEnd + k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &connection) + + connectionID := host.MustParseConnectionPath(string(iterator.Key())) + identifiedConnection := types.NewIdentifiedConnection(connectionID, connection) + if cb(identifiedConnection) { + break + } + } +} + +// GetAllConnections returns all stored ConnectionEnd objects. +func (k Keeper) GetAllConnections(ctx sdk.Context) (connections []types.IdentifiedConnection) { + k.IterateConnections(ctx, func(connection types.IdentifiedConnection) bool { + connections = append(connections, connection) + return false + }) + return connections +} + +// addConnectionToClient is used to add a connection identifier to the set of +// connections associated with a client. +func (k Keeper) addConnectionToClient(ctx sdk.Context, clientID, connectionID string) error { + _, found := k.clientKeeper.GetClientState(ctx, clientID) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID) + } + + conns, found := k.GetClientConnectionPaths(ctx, clientID) + if !found { + conns = []string{} + } + + conns = append(conns, connectionID) + k.SetClientConnectionPaths(ctx, clientID, conns) + return nil +} diff --git a/core/03-connection/keeper/keeper_test.go b/core/03-connection/keeper/keeper_test.go new file mode 100644 index 0000000000..f2a1124b55 --- /dev/null +++ b/core/03-connection/keeper/keeper_test.go @@ -0,0 +1,133 @@ +package keeper_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type KeeperTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +func (suite *KeeperTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} + +func (suite *KeeperTestSuite) TestSetAndGetConnection() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + connA := suite.chainA.GetFirstTestConnection(clientA, clientB) + _, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID) + suite.Require().False(existed) + + suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB) + _, existed = suite.chainA.App.IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), connA.ID) + suite.Require().True(existed) +} + +func (suite *KeeperTestSuite) TestSetAndGetClientConnectionPaths() { + clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + _, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), clientA) + suite.False(existed) + + connections := []string{"connectionA", "connectionB"} + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), clientA, connections) + paths, existed := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), clientA) + suite.True(existed) + suite.EqualValues(connections, paths) +} + +// create 2 connections: A0 - B0, A1 - B1 +func (suite KeeperTestSuite) TestGetAllConnections() { + clientA, clientB, connA0, connB0 := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + connA1, connB1 := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB) + + counterpartyB0 := types.NewCounterparty(clientB, connB0.ID, suite.chainB.GetPrefix()) // connection B0 + counterpartyB1 := types.NewCounterparty(clientB, connB1.ID, suite.chainB.GetPrefix()) // connection B1 + + conn1 := types.NewConnectionEnd(types.OPEN, clientA, counterpartyB0, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) // A0 - B0 + conn2 := types.NewConnectionEnd(types.OPEN, clientA, counterpartyB1, types.ExportedVersionsToProto(types.GetCompatibleVersions()), 0) // A1 - B1 + + iconn1 := types.NewIdentifiedConnection(connA0.ID, conn1) + iconn2 := types.NewIdentifiedConnection(connA1.ID, conn2) + + expConnections := []types.IdentifiedConnection{iconn1, iconn2} + + connections := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetAllConnections(suite.chainA.GetContext()) + suite.Require().Len(connections, len(expConnections)) + suite.Require().Equal(expConnections, connections) +} + +// the test creates 2 clients clientA0 and clientA1. clientA0 has a single +// connection and clientA1 has 2 connections. +func (suite KeeperTestSuite) TestGetAllClientConnectionPaths() { + clientA0, _, connA0, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + clientA1, clientB1, connA1, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + connA2, _ := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA1, clientB1) + + expPaths := []types.ConnectionPaths{ + types.NewConnectionPaths(clientA0, []string{connA0.ID}), + types.NewConnectionPaths(clientA1, []string{connA1.ID, connA2.ID}), + } + + connPaths := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetAllClientConnectionPaths(suite.chainA.GetContext()) + suite.Require().Len(connPaths, 2) + suite.Require().Equal(expPaths, connPaths) +} + +// TestGetTimestampAtHeight verifies if the clients on each chain return the +// correct timestamp for the other chain. +func (suite *KeeperTestSuite) TestGetTimestampAtHeight() { + var connection types.ConnectionEnd + + cases := []struct { + msg string + malleate func() + expPass bool + }{ + {"verification success", func() { + _, _, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + connection = suite.chainA.GetConnection(connA) + }, true}, + {"consensus state not found", func() { + // any non-nil value of connection is valid + suite.Require().NotNil(connection) + }, false}, + } + + for _, tc := range cases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + + actualTimestamp, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetTimestampAtHeight( + suite.chainA.GetContext(), connection, suite.chainB.LastHeader.GetHeight(), + ) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().EqualValues(uint64(suite.chainB.LastHeader.GetTime().UnixNano()), actualTimestamp) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/core/03-connection/keeper/verify.go b/core/03-connection/keeper/verify.go new file mode 100644 index 0000000000..ddb1ea6b96 --- /dev/null +++ b/core/03-connection/keeper/verify.go @@ -0,0 +1,225 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// VerifyClientState verifies a proof of a client state of the running machine +// stored on the target machine +func (k Keeper) VerifyClientState( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + clientState exported.ClientState, +) error { + clientID := connection.GetClientID() + targetClient, found := k.clientKeeper.GetClientState(ctx, clientID) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID) + } + + if err := targetClient.VerifyClientState( + k.clientKeeper.ClientStore(ctx, clientID), k.cdc, height, + connection.GetCounterparty().GetPrefix(), connection.GetCounterparty().GetClientID(), proof, clientState); err != nil { + return sdkerrors.Wrapf(err, "failed client state verification for target client: %s", connection.GetClientID()) + } + + return nil +} + +// VerifyClientConsensusState verifies a proof of the consensus state of the +// specified client stored on the target machine. +func (k Keeper) VerifyClientConsensusState( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + consensusHeight exported.Height, + proof []byte, + consensusState exported.ConsensusState, +) error { + clientID := connection.GetClientID() + clientState, found := k.clientKeeper.GetClientState(ctx, clientID) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, clientID) + } + + if err := clientState.VerifyClientConsensusState( + k.clientKeeper.ClientStore(ctx, clientID), k.cdc, height, + connection.GetCounterparty().GetClientID(), consensusHeight, connection.GetCounterparty().GetPrefix(), proof, consensusState, + ); err != nil { + return sdkerrors.Wrapf(err, "failed consensus state verification for client (%s)", connection.GetClientID()) + } + + return nil +} + +// VerifyConnectionState verifies a proof of the connection state of the +// specified connection end stored on the target machine. +func (k Keeper) VerifyConnectionState( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + connectionID string, + connectionEnd exported.ConnectionI, // opposite connection +) error { + clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID()) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID()) + } + + if err := clientState.VerifyConnectionState( + k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height, + connection.GetCounterparty().GetPrefix(), proof, connectionID, connectionEnd, + ); err != nil { + return sdkerrors.Wrapf(err, "failed connection state verification for client (%s)", connection.GetClientID()) + } + + return nil +} + +// VerifyChannelState verifies a proof of the channel state of the specified +// channel end, under the specified port, stored on the target machine. +func (k Keeper) VerifyChannelState( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + channel exported.ChannelI, +) error { + clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID()) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID()) + } + + if err := clientState.VerifyChannelState( + k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height, + connection.GetCounterparty().GetPrefix(), proof, + portID, channelID, channel, + ); err != nil { + return sdkerrors.Wrapf(err, "failed channel state verification for client (%s)", connection.GetClientID()) + } + + return nil +} + +// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at +// the specified port, specified channel, and specified sequence. +func (k Keeper) VerifyPacketCommitment( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + sequence uint64, + commitmentBytes []byte, +) error { + clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID()) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID()) + } + + if err := clientState.VerifyPacketCommitment( + k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height, + uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(), + connection.GetCounterparty().GetPrefix(), proof, portID, channelID, + sequence, commitmentBytes, + ); err != nil { + return sdkerrors.Wrapf(err, "failed packet commitment verification for client (%s)", connection.GetClientID()) + } + + return nil +} + +// VerifyPacketAcknowledgement verifies a proof of an incoming packet +// acknowledgement at the specified port, specified channel, and specified sequence. +func (k Keeper) VerifyPacketAcknowledgement( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + sequence uint64, + acknowledgement []byte, +) error { + clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID()) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID()) + } + + if err := clientState.VerifyPacketAcknowledgement( + k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height, + uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(), + connection.GetCounterparty().GetPrefix(), proof, portID, channelID, + sequence, acknowledgement, + ); err != nil { + return sdkerrors.Wrapf(err, "failed packet acknowledgement verification for client (%s)", connection.GetClientID()) + } + + return nil +} + +// VerifyPacketReceiptAbsence verifies a proof of the absence of an +// incoming packet receipt at the specified port, specified channel, and +// specified sequence. +func (k Keeper) VerifyPacketReceiptAbsence( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + sequence uint64, +) error { + clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID()) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID()) + } + + if err := clientState.VerifyPacketReceiptAbsence( + k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height, + uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(), + connection.GetCounterparty().GetPrefix(), proof, portID, channelID, + sequence, + ); err != nil { + return sdkerrors.Wrapf(err, "failed packet receipt absence verification for client (%s)", connection.GetClientID()) + } + + return nil +} + +// VerifyNextSequenceRecv verifies a proof of the next sequence number to be +// received of the specified channel at the specified port. +func (k Keeper) VerifyNextSequenceRecv( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + nextSequenceRecv uint64, +) error { + clientState, found := k.clientKeeper.GetClientState(ctx, connection.GetClientID()) + if !found { + return sdkerrors.Wrap(clienttypes.ErrClientNotFound, connection.GetClientID()) + } + + if err := clientState.VerifyNextSequenceRecv( + k.clientKeeper.ClientStore(ctx, connection.GetClientID()), k.cdc, height, + uint64(ctx.BlockTime().UnixNano()), connection.GetDelayPeriod(), + connection.GetCounterparty().GetPrefix(), proof, portID, channelID, + nextSequenceRecv, + ); err != nil { + return sdkerrors.Wrapf(err, "failed next sequence receive verification for client (%s)", connection.GetClientID()) + } + + return nil +} diff --git a/core/03-connection/keeper/verify_test.go b/core/03-connection/keeper/verify_test.go new file mode 100644 index 0000000000..2d94955d8e --- /dev/null +++ b/core/03-connection/keeper/verify_test.go @@ -0,0 +1,514 @@ +package keeper_test + +import ( + "fmt" + "time" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +var defaultTimeoutHeight = clienttypes.NewHeight(0, 100000) + +// TestVerifyClientState verifies a client state of chainA +// stored on clientB (which is on chainB) +func (suite *KeeperTestSuite) TestVerifyClientState() { + cases := []struct { + msg string + changeClientID bool + heightDiff uint64 + malleateCounterparty bool + expPass bool + }{ + {"verification success", false, 0, false, true}, + {"client state not found", true, 0, false, false}, + {"consensus state for proof height not found", false, 5, false, false}, + {"verification failed", false, 0, true, false}, + } + + for _, tc := range cases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + + _, clientB, connA, _ := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + counterpartyClient, clientProof := suite.chainB.QueryClientStateProof(clientB) + proofHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()-1)) + + if tc.malleateCounterparty { + tmClient, _ := counterpartyClient.(*ibctmtypes.ClientState) + tmClient.ChainId = "wrongChainID" + } + + connection := suite.chainA.GetConnection(connA) + if tc.changeClientID { + connection.ClientId = ibctesting.InvalidID + } + + err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyClientState( + suite.chainA.GetContext(), connection, + malleateHeight(proofHeight, tc.heightDiff), clientProof, counterpartyClient, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestVerifyClientConsensusState verifies that the consensus state of +// chainA stored on clientB (which is on chainB) matches the consensus +// state for chainA at that height. +func (suite *KeeperTestSuite) TestVerifyClientConsensusState() { + var ( + connA *ibctesting.TestConnection + connB *ibctesting.TestConnection + changeClientID bool + heightDiff uint64 + ) + cases := []struct { + msg string + malleate func() + expPass bool + }{ + {"verification success", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + }, true}, + {"client state not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + changeClientID = true + }, false}, + {"consensus state not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + heightDiff = 5 + }, false}, + {"verification failed", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + clientB := connB.ClientID + clientState := suite.chainB.GetClientState(clientB) + + // give chainB wrong consensus state for chainA + consState, found := suite.chainB.App.IBCKeeper.ClientKeeper.GetLatestClientConsensusState(suite.chainB.GetContext(), clientB) + suite.Require().True(found) + + tmConsState, ok := consState.(*ibctmtypes.ConsensusState) + suite.Require().True(ok) + + tmConsState.Timestamp = time.Now() + suite.chainB.App.IBCKeeper.ClientKeeper.SetClientConsensusState(suite.chainB.GetContext(), clientB, clientState.GetLatestHeight(), tmConsState) + + suite.coordinator.CommitBlock(suite.chainB) + }, false}, + } + + for _, tc := range cases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + heightDiff = 0 // must be explicitly changed in malleate + changeClientID = false // must be explicitly changed in malleate + + tc.malleate() + + connection := suite.chainA.GetConnection(connA) + if changeClientID { + connection.ClientId = ibctesting.InvalidID + } + + proof, consensusHeight := suite.chainB.QueryConsensusStateProof(connB.ClientID) + proofHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()-1)) + consensusState, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetSelfConsensusState(suite.chainA.GetContext(), consensusHeight) + suite.Require().True(found) + + err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyClientConsensusState( + suite.chainA.GetContext(), connection, + malleateHeight(proofHeight, heightDiff), consensusHeight, proof, consensusState, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestVerifyConnectionState verifies the connection state of the connection +// on chainB. The connections on chainA and chainB are fully opened. +func (suite *KeeperTestSuite) TestVerifyConnectionState() { + cases := []struct { + msg string + changeClientID bool + changeConnectionState bool + heightDiff uint64 + expPass bool + }{ + {"verification success", false, false, 0, true}, + {"client state not found - changed client ID", true, false, 0, false}, + {"consensus state not found - increased proof height", false, false, 5, false}, + {"verification failed - connection state is different than proof", false, true, 0, false}, + } + + for _, tc := range cases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + connection := suite.chainA.GetConnection(connA) + if tc.changeClientID { + connection.ClientId = ibctesting.InvalidID + } + expectedConnection := suite.chainB.GetConnection(connB) + + connectionKey := host.ConnectionKey(connB.ID) + proof, proofHeight := suite.chainB.QueryProof(connectionKey) + + if tc.changeConnectionState { + expectedConnection.State = types.TRYOPEN + } + + err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyConnectionState( + suite.chainA.GetContext(), connection, + malleateHeight(proofHeight, tc.heightDiff), proof, connB.ID, expectedConnection, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestVerifyChannelState verifies the channel state of the channel on +// chainB. The channels on chainA and chainB are fully opened. +func (suite *KeeperTestSuite) TestVerifyChannelState() { + cases := []struct { + msg string + changeClientID bool + changeChannelState bool + heightDiff uint64 + expPass bool + }{ + {"verification success", false, false, 0, true}, + {"client state not found- changed client ID", true, false, 0, false}, + {"consensus state not found - increased proof height", false, false, 5, false}, + {"verification failed - changed channel state", false, true, 0, false}, + } + + for _, tc := range cases { + tc := tc + + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + _, _, connA, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + connection := suite.chainA.GetConnection(connA) + if tc.changeClientID { + connection.ClientId = ibctesting.InvalidID + } + + channelKey := host.ChannelKey(channelB.PortID, channelB.ID) + proof, proofHeight := suite.chainB.QueryProof(channelKey) + + channel := suite.chainB.GetChannel(channelB) + if tc.changeChannelState { + channel.State = channeltypes.TRYOPEN + } + + err := suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyChannelState( + suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof, + channelB.PortID, channelB.ID, channel, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestVerifyPacketCommitmentState has chainB verify the packet commitment +// on channelA. The channels on chainA and chainB are fully opened and a +// packet is sent from chainA to chainB, but has not been received. +func (suite *KeeperTestSuite) TestVerifyPacketCommitment() { + cases := []struct { + msg string + changeClientID bool + changePacketCommitmentState bool + heightDiff uint64 + delayPeriod uint64 + expPass bool + }{ + {"verification success", false, false, 0, 0, true}, + {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true}, + {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false}, + {"client state not found- changed client ID", true, false, 0, 0, false}, + {"consensus state not found - increased proof height", false, false, 5, 0, false}, + {"verification failed - changed packet commitment state", false, true, 0, 0, false}, + } + + for _, tc := range cases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + + _, clientB, _, connB, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + connection := suite.chainB.GetConnection(connB) + connection.DelayPeriod = tc.delayPeriod + if tc.changeClientID { + connection.ClientId = ibctesting.InvalidID + } + + packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0) + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + commitmentKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + proof, proofHeight := suite.chainA.QueryProof(commitmentKey) + + if tc.changePacketCommitmentState { + packet.Data = []byte(ibctesting.InvalidID) + } + + commitment := channeltypes.CommitPacket(suite.chainB.App.IBCKeeper.Codec(), packet) + err = suite.chainB.App.IBCKeeper.ConnectionKeeper.VerifyPacketCommitment( + suite.chainB.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof, + packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestVerifyPacketAcknowledgement has chainA verify the acknowledgement on +// channelB. The channels on chainA and chainB are fully opened and a packet +// is sent from chainA to chainB and received. +func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() { + cases := []struct { + msg string + changeClientID bool + changeAcknowledgement bool + heightDiff uint64 + delayPeriod uint64 + expPass bool + }{ + {"verification success", false, false, 0, 0, true}, + {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true}, + {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false}, + {"client state not found- changed client ID", true, false, 0, 0, false}, + {"consensus state not found - increased proof height", false, false, 5, 0, false}, + {"verification failed - changed acknowledgement", false, true, 0, 0, false}, + } + + for _, tc := range cases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + + clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + connection := suite.chainA.GetConnection(connA) + connection.DelayPeriod = tc.delayPeriod + if tc.changeClientID { + connection.ClientId = ibctesting.InvalidID + } + + // send and receive packet + packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0) + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // increment receiving chain's (chainB) time by 2 hour to always pass receive + suite.coordinator.IncrementTimeBy(time.Hour * 2) + suite.coordinator.CommitBlock(suite.chainB) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + packetAckKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight := suite.chainB.QueryProof(packetAckKey) + + ack := ibcmock.MockAcknowledgement + if tc.changeAcknowledgement { + ack = []byte(ibctesting.InvalidID) + } + + err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyPacketAcknowledgement( + suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestVerifyPacketReceiptAbsence has chainA verify the receipt +// absence on channelB. The channels on chainA and chainB are fully opened and +// a packet is sent from chainA to chainB and not received. +func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() { + cases := []struct { + msg string + changeClientID bool + recvAck bool + heightDiff uint64 + delayPeriod uint64 + expPass bool + }{ + {"verification success", false, false, 0, 0, true}, + {"verification success: delay period passed", false, false, 0, uint64(1 * time.Second.Nanoseconds()), true}, + {"delay period has not passed", false, false, 0, uint64(1 * time.Hour.Nanoseconds()), false}, + {"client state not found - changed client ID", true, false, 0, 0, false}, + {"consensus state not found - increased proof height", false, false, 5, 0, false}, + {"verification failed - acknowledgement was received", false, true, 0, 0, false}, + } + + for _, tc := range cases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + + clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + connection := suite.chainA.GetConnection(connA) + connection.DelayPeriod = tc.delayPeriod + if tc.changeClientID { + connection.ClientId = ibctesting.InvalidID + } + + // send, only receive if specified + packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0) + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + if tc.recvAck { + // increment receiving chain's (chainB) time by 2 hour to always pass receive + suite.coordinator.IncrementTimeBy(time.Hour * 2) + suite.coordinator.CommitBlock(suite.chainB) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + } else { + // need to update height to prove absence + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + } + + packetReceiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight := suite.chainB.QueryProof(packetReceiptKey) + + err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyPacketReceiptAbsence( + suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestVerifyNextSequenceRecv has chainA verify the next sequence receive on +// channelB. The channels on chainA and chainB are fully opened and a packet +// is sent from chainA to chainB and received. +func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() { + cases := []struct { + msg string + changeClientID bool + offsetSeq uint64 + heightDiff uint64 + delayPeriod uint64 + expPass bool + }{ + {"verification success", false, 0, 0, 0, true}, + {"verification success: delay period passed", false, 0, 0, uint64(1 * time.Second.Nanoseconds()), true}, + {"delay period has not passed", false, 0, 0, uint64(1 * time.Hour.Nanoseconds()), false}, + {"client state not found- changed client ID", true, 0, 0, 0, false}, + {"consensus state not found - increased proof height", false, 0, 5, 0, false}, + {"verification failed - wrong expected next seq recv", false, 1, 0, 0, false}, + } + + for _, tc := range cases { + tc := tc + + suite.Run(tc.msg, func() { + suite.SetupTest() // reset + + clientA, clientB, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + connection := suite.chainA.GetConnection(connA) + connection.DelayPeriod = tc.delayPeriod + if tc.changeClientID { + connection.ClientId = ibctesting.InvalidID + } + + // send and receive packet + packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, defaultTimeoutHeight, 0) + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // increment receiving chain's (chainB) time by 2 hour to always pass receive + suite.coordinator.IncrementTimeBy(time.Hour * 2) + suite.coordinator.CommitBlock(suite.chainB) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + proof, proofHeight := suite.chainB.QueryProof(nextSeqRecvKey) + + err = suite.chainA.App.IBCKeeper.ConnectionKeeper.VerifyNextSequenceRecv( + suite.chainA.GetContext(), connection, malleateHeight(proofHeight, tc.heightDiff), proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+tc.offsetSeq, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func malleateHeight(height exported.Height, diff uint64) exported.Height { + return clienttypes.NewHeight(height.GetRevisionNumber(), height.GetRevisionHeight()+diff) +} diff --git a/core/03-connection/module.go b/core/03-connection/module.go new file mode 100644 index 0000000000..6100caa462 --- /dev/null +++ b/core/03-connection/module.go @@ -0,0 +1,29 @@ +package connection + +import ( + "github.com/gogo/protobuf/grpc" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/cli" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" +) + +// Name returns the IBC connection ICS name. +func Name() string { + return types.SubModuleName +} + +// GetTxCmd returns the root tx command for the IBC connections. +func GetTxCmd() *cobra.Command { + return cli.NewTxCmd() +} + +// GetQueryCmd returns the root query command for the IBC connections. +func GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// RegisterQueryService registers the gRPC query service for IBC connections. +func RegisterQueryService(server grpc.Server, queryServer types.QueryServer) { + types.RegisterQueryServer(server, queryServer) +} diff --git a/core/03-connection/simulation/decoder.go b/core/03-connection/simulation/decoder.go new file mode 100644 index 0000000000..ef988a103f --- /dev/null +++ b/core/03-connection/simulation/decoder.go @@ -0,0 +1,32 @@ +package simulation + +import ( + "bytes" + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/types/kv" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's +// Value to the corresponding connection type. +func NewDecodeStore(cdc codec.BinaryMarshaler, kvA, kvB kv.Pair) (string, bool) { + switch { + case bytes.HasPrefix(kvA.Key, host.KeyClientStorePrefix) && bytes.HasSuffix(kvA.Key, []byte(host.KeyConnectionPrefix)): + var clientConnectionsA, clientConnectionsB types.ClientPaths + cdc.MustUnmarshalBinaryBare(kvA.Value, &clientConnectionsA) + cdc.MustUnmarshalBinaryBare(kvB.Value, &clientConnectionsB) + return fmt.Sprintf("ClientPaths A: %v\nClientPaths B: %v", clientConnectionsA, clientConnectionsB), true + + case bytes.HasPrefix(kvA.Key, []byte(host.KeyConnectionPrefix)): + var connectionA, connectionB types.ConnectionEnd + cdc.MustUnmarshalBinaryBare(kvA.Value, &connectionA) + cdc.MustUnmarshalBinaryBare(kvB.Value, &connectionB) + return fmt.Sprintf("ConnectionEnd A: %v\nConnectionEnd B: %v", connectionA, connectionB), true + + default: + return "", false + } +} diff --git a/core/03-connection/simulation/decoder_test.go b/core/03-connection/simulation/decoder_test.go new file mode 100644 index 0000000000..673bf64006 --- /dev/null +++ b/core/03-connection/simulation/decoder_test.go @@ -0,0 +1,69 @@ +package simulation_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/types/kv" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +func TestDecodeStore(t *testing.T) { + app := simapp.Setup(false) + cdc := app.AppCodec() + + connectionID := "connectionidone" + + connection := types.ConnectionEnd{ + ClientId: "clientidone", + Versions: types.ExportedVersionsToProto(types.GetCompatibleVersions()), + } + + paths := types.ClientPaths{ + Paths: []string{connectionID}, + } + + kvPairs := kv.Pairs{ + Pairs: []kv.Pair{ + { + Key: host.ClientConnectionsKey(connection.ClientId), + Value: cdc.MustMarshalBinaryBare(&paths), + }, + { + Key: host.ConnectionKey(connectionID), + Value: cdc.MustMarshalBinaryBare(&connection), + }, + { + Key: []byte{0x99}, + Value: []byte{0x99}, + }, + }, + } + tests := []struct { + name string + expectedLog string + }{ + {"ClientPaths", fmt.Sprintf("ClientPaths A: %v\nClientPaths B: %v", paths, paths)}, + {"ConnectionEnd", fmt.Sprintf("ConnectionEnd A: %v\nConnectionEnd B: %v", connection, connection)}, + {"other", ""}, + } + + for i, tt := range tests { + i, tt := i, tt + t.Run(tt.name, func(t *testing.T) { + res, found := simulation.NewDecodeStore(cdc, kvPairs.Pairs[i], kvPairs.Pairs[i]) + if i == len(tests)-1 { + require.False(t, found, string(kvPairs.Pairs[i].Key)) + require.Empty(t, res, string(kvPairs.Pairs[i].Key)) + } else { + require.True(t, found, string(kvPairs.Pairs[i].Key)) + require.Equal(t, tt.expectedLog, res, string(kvPairs.Pairs[i].Key)) + } + }) + } +} diff --git a/core/03-connection/simulation/genesis.go b/core/03-connection/simulation/genesis.go new file mode 100644 index 0000000000..43b0823776 --- /dev/null +++ b/core/03-connection/simulation/genesis.go @@ -0,0 +1,13 @@ +package simulation + +import ( + "math/rand" + + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" +) + +// GenConnectionGenesis returns the default connection genesis state. +func GenConnectionGenesis(_ *rand.Rand, _ []simtypes.Account) types.GenesisState { + return types.DefaultGenesisState() +} diff --git a/core/03-connection/types/codec.go b/core/03-connection/types/codec.go new file mode 100644 index 0000000000..6105fa9ee1 --- /dev/null +++ b/core/03-connection/types/codec.go @@ -0,0 +1,47 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// RegisterInterfaces register the ibc interfaces submodule implementations to protobuf +// Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterInterface( + "ibc.core.connection.v1.ConnectionI", + (*exported.ConnectionI)(nil), + &ConnectionEnd{}, + ) + registry.RegisterInterface( + "ibc.core.connection.v1.CounterpartyConnectionI", + (*exported.CounterpartyConnectionI)(nil), + &Counterparty{}, + ) + registry.RegisterInterface( + "ibc.core.connection.v1.Version", + (*exported.Version)(nil), + &Version{}, + ) + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgConnectionOpenInit{}, + &MsgConnectionOpenTry{}, + &MsgConnectionOpenAck{}, + &MsgConnectionOpenConfirm{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +var ( + // SubModuleCdc references the global x/ibc/core/03-connection module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding. + // + // The actual codec used for serialization should be provided to x/ibc/core/03-connection and + // defined at the application level. + SubModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) +) diff --git a/core/03-connection/types/connection.go b/core/03-connection/types/connection.go new file mode 100644 index 0000000000..197af83cad --- /dev/null +++ b/core/03-connection/types/connection.go @@ -0,0 +1,127 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.ConnectionI = (*ConnectionEnd)(nil) + +// NewConnectionEnd creates a new ConnectionEnd instance. +func NewConnectionEnd(state State, clientID string, counterparty Counterparty, versions []*Version, delayPeriod uint64) ConnectionEnd { + return ConnectionEnd{ + ClientId: clientID, + Versions: versions, + State: state, + Counterparty: counterparty, + DelayPeriod: delayPeriod, + } +} + +// GetState implements the Connection interface +func (c ConnectionEnd) GetState() int32 { + return int32(c.State) +} + +// GetClientID implements the Connection interface +func (c ConnectionEnd) GetClientID() string { + return c.ClientId +} + +// GetCounterparty implements the Connection interface +func (c ConnectionEnd) GetCounterparty() exported.CounterpartyConnectionI { + return c.Counterparty +} + +// GetVersions implements the Connection interface +func (c ConnectionEnd) GetVersions() []exported.Version { + return ProtoVersionsToExported(c.Versions) +} + +// GetDelayPeriod implements the Connection interface +func (c ConnectionEnd) GetDelayPeriod() uint64 { + return c.DelayPeriod +} + +// ValidateBasic implements the Connection interface. +// NOTE: the protocol supports that the connection and client IDs match the +// counterparty's. +func (c ConnectionEnd) ValidateBasic() error { + if err := host.ClientIdentifierValidator(c.ClientId); err != nil { + return sdkerrors.Wrap(err, "invalid client ID") + } + if len(c.Versions) == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidVersion, "empty connection versions") + } + for _, version := range c.Versions { + if err := ValidateVersion(version); err != nil { + return err + } + } + return c.Counterparty.ValidateBasic() +} + +var _ exported.CounterpartyConnectionI = (*Counterparty)(nil) + +// NewCounterparty creates a new Counterparty instance. +func NewCounterparty(clientID, connectionID string, prefix commitmenttypes.MerklePrefix) Counterparty { + return Counterparty{ + ClientId: clientID, + ConnectionId: connectionID, + Prefix: prefix, + } +} + +// GetClientID implements the CounterpartyConnectionI interface +func (c Counterparty) GetClientID() string { + return c.ClientId +} + +// GetConnectionID implements the CounterpartyConnectionI interface +func (c Counterparty) GetConnectionID() string { + return c.ConnectionId +} + +// GetPrefix implements the CounterpartyConnectionI interface +func (c Counterparty) GetPrefix() exported.Prefix { + return &c.Prefix +} + +// ValidateBasic performs a basic validation check of the identifiers and prefix +func (c Counterparty) ValidateBasic() error { + if c.ConnectionId != "" { + if err := host.ConnectionIdentifierValidator(c.ConnectionId); err != nil { + return sdkerrors.Wrap(err, "invalid counterparty connection ID") + } + } + if err := host.ClientIdentifierValidator(c.ClientId); err != nil { + return sdkerrors.Wrap(err, "invalid counterparty client ID") + } + if c.Prefix.Empty() { + return sdkerrors.Wrap(ErrInvalidCounterparty, "counterparty prefix cannot be empty") + } + return nil +} + +// NewIdentifiedConnection creates a new IdentifiedConnection instance +func NewIdentifiedConnection(connectionID string, conn ConnectionEnd) IdentifiedConnection { + return IdentifiedConnection{ + Id: connectionID, + ClientId: conn.ClientId, + Versions: conn.Versions, + State: conn.State, + Counterparty: conn.Counterparty, + DelayPeriod: conn.DelayPeriod, + } +} + +// ValidateBasic performs a basic validation of the connection identifier and connection fields. +func (ic IdentifiedConnection) ValidateBasic() error { + if err := host.ConnectionIdentifierValidator(ic.Id); err != nil { + return sdkerrors.Wrap(err, "invalid connection ID") + } + connection := NewConnectionEnd(ic.State, ic.ClientId, ic.Counterparty, ic.Versions, ic.DelayPeriod) + return connection.ValidateBasic() +} diff --git a/core/03-connection/types/connection.pb.go b/core/03-connection/types/connection.pb.go new file mode 100644 index 0000000000..ec417b7590 --- /dev/null +++ b/core/03-connection/types/connection.pb.go @@ -0,0 +1,1801 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/connection/v1/connection.proto + +package types + +import ( + fmt "fmt" + types "github.com/cosmos/ibc-go/core/23-commitment/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State defines if a connection is in one of the following states: +// INIT, TRYOPEN, OPEN or UNINITIALIZED. +type State int32 + +const ( + // Default State + UNINITIALIZED State = 0 + // A connection end has just started the opening handshake. + INIT State = 1 + // A connection end has acknowledged the handshake step on the counterparty + // chain. + TRYOPEN State = 2 + // A connection end has completed the handshake. + OPEN State = 3 +) + +var State_name = map[int32]string{ + 0: "STATE_UNINITIALIZED_UNSPECIFIED", + 1: "STATE_INIT", + 2: "STATE_TRYOPEN", + 3: "STATE_OPEN", +} + +var State_value = map[string]int32{ + "STATE_UNINITIALIZED_UNSPECIFIED": 0, + "STATE_INIT": 1, + "STATE_TRYOPEN": 2, + "STATE_OPEN": 3, +} + +func (x State) String() string { + return proto.EnumName(State_name, int32(x)) +} + +func (State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_278e9c8044b4f86b, []int{0} +} + +// ConnectionEnd defines a stateful object on a chain connected to another +// separate one. +// NOTE: there must only be 2 defined ConnectionEnds to establish +// a connection between two chains. +type ConnectionEnd struct { + // client associated with this connection. + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // IBC version which can be utilised to determine encodings or protocols for + // channels or packets utilising this connection. + Versions []*Version `protobuf:"bytes,2,rep,name=versions,proto3" json:"versions,omitempty"` + // current state of the connection end. + State State `protobuf:"varint,3,opt,name=state,proto3,enum=ibcgo.core.connection.v1.State" json:"state,omitempty"` + // counterparty chain associated with this connection. + Counterparty Counterparty `protobuf:"bytes,4,opt,name=counterparty,proto3" json:"counterparty"` + // delay period that must pass before a consensus state can be used for + // packet-verification NOTE: delay period logic is only implemented by some + // clients. + DelayPeriod uint64 `protobuf:"varint,5,opt,name=delay_period,json=delayPeriod,proto3" json:"delay_period,omitempty" yaml:"delay_period"` +} + +func (m *ConnectionEnd) Reset() { *m = ConnectionEnd{} } +func (m *ConnectionEnd) String() string { return proto.CompactTextString(m) } +func (*ConnectionEnd) ProtoMessage() {} +func (*ConnectionEnd) Descriptor() ([]byte, []int) { + return fileDescriptor_278e9c8044b4f86b, []int{0} +} +func (m *ConnectionEnd) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionEnd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionEnd.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionEnd) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionEnd.Merge(m, src) +} +func (m *ConnectionEnd) XXX_Size() int { + return m.Size() +} +func (m *ConnectionEnd) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionEnd.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionEnd proto.InternalMessageInfo + +// IdentifiedConnection defines a connection with additional connection +// identifier field. +type IdentifiedConnection struct { + // connection identifier. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" yaml:"id"` + // client associated with this connection. + ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // IBC version which can be utilised to determine encodings or protocols for + // channels or packets utilising this connection + Versions []*Version `protobuf:"bytes,3,rep,name=versions,proto3" json:"versions,omitempty"` + // current state of the connection end. + State State `protobuf:"varint,4,opt,name=state,proto3,enum=ibcgo.core.connection.v1.State" json:"state,omitempty"` + // counterparty chain associated with this connection. + Counterparty Counterparty `protobuf:"bytes,5,opt,name=counterparty,proto3" json:"counterparty"` + // delay period associated with this connection. + DelayPeriod uint64 `protobuf:"varint,6,opt,name=delay_period,json=delayPeriod,proto3" json:"delay_period,omitempty" yaml:"delay_period"` +} + +func (m *IdentifiedConnection) Reset() { *m = IdentifiedConnection{} } +func (m *IdentifiedConnection) String() string { return proto.CompactTextString(m) } +func (*IdentifiedConnection) ProtoMessage() {} +func (*IdentifiedConnection) Descriptor() ([]byte, []int) { + return fileDescriptor_278e9c8044b4f86b, []int{1} +} +func (m *IdentifiedConnection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IdentifiedConnection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IdentifiedConnection.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IdentifiedConnection) XXX_Merge(src proto.Message) { + xxx_messageInfo_IdentifiedConnection.Merge(m, src) +} +func (m *IdentifiedConnection) XXX_Size() int { + return m.Size() +} +func (m *IdentifiedConnection) XXX_DiscardUnknown() { + xxx_messageInfo_IdentifiedConnection.DiscardUnknown(m) +} + +var xxx_messageInfo_IdentifiedConnection proto.InternalMessageInfo + +// Counterparty defines the counterparty chain associated with a connection end. +type Counterparty struct { + // identifies the client on the counterparty chain associated with a given + // connection. + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // identifies the connection end on the counterparty chain associated with a + // given connection. + ConnectionId string `protobuf:"bytes,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"` + // commitment merkle prefix of the counterparty chain. + Prefix types.MerklePrefix `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix"` +} + +func (m *Counterparty) Reset() { *m = Counterparty{} } +func (m *Counterparty) String() string { return proto.CompactTextString(m) } +func (*Counterparty) ProtoMessage() {} +func (*Counterparty) Descriptor() ([]byte, []int) { + return fileDescriptor_278e9c8044b4f86b, []int{2} +} +func (m *Counterparty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counterparty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Counterparty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Counterparty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counterparty.Merge(m, src) +} +func (m *Counterparty) XXX_Size() int { + return m.Size() +} +func (m *Counterparty) XXX_DiscardUnknown() { + xxx_messageInfo_Counterparty.DiscardUnknown(m) +} + +var xxx_messageInfo_Counterparty proto.InternalMessageInfo + +// ClientPaths define all the connection paths for a client state. +type ClientPaths struct { + // list of connection paths + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` +} + +func (m *ClientPaths) Reset() { *m = ClientPaths{} } +func (m *ClientPaths) String() string { return proto.CompactTextString(m) } +func (*ClientPaths) ProtoMessage() {} +func (*ClientPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_278e9c8044b4f86b, []int{3} +} +func (m *ClientPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientPaths.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientPaths.Merge(m, src) +} +func (m *ClientPaths) XXX_Size() int { + return m.Size() +} +func (m *ClientPaths) XXX_DiscardUnknown() { + xxx_messageInfo_ClientPaths.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientPaths proto.InternalMessageInfo + +func (m *ClientPaths) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +// ConnectionPaths define all the connection paths for a given client state. +type ConnectionPaths struct { + // client state unique identifier + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // list of connection paths + Paths []string `protobuf:"bytes,2,rep,name=paths,proto3" json:"paths,omitempty"` +} + +func (m *ConnectionPaths) Reset() { *m = ConnectionPaths{} } +func (m *ConnectionPaths) String() string { return proto.CompactTextString(m) } +func (*ConnectionPaths) ProtoMessage() {} +func (*ConnectionPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_278e9c8044b4f86b, []int{4} +} +func (m *ConnectionPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionPaths.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionPaths.Merge(m, src) +} +func (m *ConnectionPaths) XXX_Size() int { + return m.Size() +} +func (m *ConnectionPaths) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionPaths.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionPaths proto.InternalMessageInfo + +func (m *ConnectionPaths) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *ConnectionPaths) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +// Version defines the versioning scheme used to negotiate the IBC verison in +// the connection handshake. +type Version struct { + // unique version identifier + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // list of features compatible with the specified identifier + Features []string `protobuf:"bytes,2,rep,name=features,proto3" json:"features,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_278e9c8044b4f86b, []int{5} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return m.Size() +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("ibcgo.core.connection.v1.State", State_name, State_value) + proto.RegisterType((*ConnectionEnd)(nil), "ibcgo.core.connection.v1.ConnectionEnd") + proto.RegisterType((*IdentifiedConnection)(nil), "ibcgo.core.connection.v1.IdentifiedConnection") + proto.RegisterType((*Counterparty)(nil), "ibcgo.core.connection.v1.Counterparty") + proto.RegisterType((*ClientPaths)(nil), "ibcgo.core.connection.v1.ClientPaths") + proto.RegisterType((*ConnectionPaths)(nil), "ibcgo.core.connection.v1.ConnectionPaths") + proto.RegisterType((*Version)(nil), "ibcgo.core.connection.v1.Version") +} + +func init() { + proto.RegisterFile("ibcgo/core/connection/v1/connection.proto", fileDescriptor_278e9c8044b4f86b) +} + +var fileDescriptor_278e9c8044b4f86b = []byte{ + // 648 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xc1, 0x6e, 0xda, 0x4a, + 0x14, 0xf5, 0x18, 0x93, 0xc0, 0x10, 0xde, 0xa3, 0x53, 0xa4, 0x5a, 0x96, 0x6a, 0xbb, 0xae, 0x54, + 0xd1, 0x4a, 0x81, 0x92, 0xa8, 0x5d, 0x44, 0xea, 0x22, 0x10, 0x2a, 0x59, 0x69, 0x29, 0x72, 0x48, + 0xa5, 0x66, 0x83, 0xc0, 0x9e, 0x90, 0x51, 0xc1, 0x83, 0xec, 0x09, 0x2a, 0x7f, 0x10, 0x65, 0xd5, + 0x6d, 0x17, 0x91, 0x2a, 0xf5, 0x67, 0xb2, 0xc8, 0x22, 0xcb, 0xae, 0x50, 0x95, 0xfc, 0x01, 0x5f, + 0x50, 0xd9, 0x63, 0x8c, 0xd3, 0x8a, 0x45, 0x92, 0xee, 0xee, 0xf1, 0x3d, 0xe7, 0x30, 0xf7, 0xcc, + 0x65, 0xe0, 0x73, 0xd2, 0xb3, 0xfb, 0xb4, 0x62, 0x53, 0x0f, 0x57, 0x6c, 0xea, 0xba, 0xd8, 0x66, + 0x84, 0xba, 0x95, 0x71, 0x35, 0x81, 0xca, 0x23, 0x8f, 0x32, 0x8a, 0xe4, 0x90, 0x5a, 0x0e, 0xa8, + 0xe5, 0x44, 0x73, 0x5c, 0x55, 0x8a, 0x7d, 0xda, 0xa7, 0x21, 0xa9, 0x12, 0x54, 0x9c, 0xaf, 0xdc, + 0xb4, 0x1e, 0x0e, 0x09, 0x1b, 0x62, 0x97, 0x71, 0xeb, 0x39, 0xe2, 0x54, 0xe3, 0x42, 0x84, 0xf9, + 0x7a, 0x6c, 0xd9, 0x70, 0x1d, 0x54, 0x85, 0x59, 0x7b, 0x40, 0xb0, 0xcb, 0x3a, 0xc4, 0x91, 0x81, + 0x0e, 0x4a, 0xd9, 0x5a, 0x71, 0x36, 0xd5, 0x0a, 0x93, 0xee, 0x70, 0xb0, 0x65, 0xc4, 0x2d, 0xc3, + 0xca, 0xf0, 0xda, 0x74, 0xd0, 0x1b, 0x98, 0x19, 0x63, 0xcf, 0x27, 0xd4, 0xf5, 0x65, 0x51, 0x4f, + 0x95, 0x72, 0x1b, 0x4f, 0xca, 0xcb, 0x8e, 0x5c, 0xfe, 0xc8, 0x99, 0x56, 0x2c, 0x41, 0xaf, 0x60, + 0xda, 0x67, 0x5d, 0x86, 0xe5, 0x94, 0x0e, 0x4a, 0xff, 0x6d, 0x68, 0xcb, 0xb5, 0x7b, 0x01, 0xcd, + 0xe2, 0x6c, 0xd4, 0x82, 0x6b, 0x36, 0x3d, 0x76, 0x19, 0xf6, 0x46, 0x5d, 0x8f, 0x4d, 0x64, 0x49, + 0x07, 0xa5, 0xdc, 0xc6, 0xb3, 0xe5, 0xea, 0x7a, 0x82, 0x5d, 0x93, 0xce, 0xa7, 0x9a, 0x60, 0xdd, + 0x70, 0x40, 0x5b, 0x70, 0xcd, 0xc1, 0x83, 0xee, 0xa4, 0x33, 0xc2, 0x1e, 0xa1, 0x8e, 0x9c, 0xd6, + 0x41, 0x49, 0xaa, 0x3d, 0x9a, 0x4d, 0xb5, 0x87, 0x7c, 0xfa, 0x64, 0xd7, 0xb0, 0x72, 0x21, 0x6c, + 0x85, 0x68, 0x4b, 0x3a, 0xf9, 0xae, 0x09, 0xc6, 0x4c, 0x84, 0x45, 0xd3, 0xc1, 0x2e, 0x23, 0x87, + 0x04, 0x3b, 0x8b, 0x60, 0xd1, 0x63, 0x28, 0xc6, 0x71, 0xe6, 0x67, 0x53, 0x2d, 0xcb, 0x0d, 0x83, + 0x1c, 0x45, 0xf2, 0x47, 0xe8, 0xe2, 0xad, 0x43, 0x4f, 0xdd, 0x23, 0x74, 0xe9, 0x5e, 0xa1, 0xa7, + 0xff, 0x79, 0xe8, 0x2b, 0xb7, 0x0e, 0xfd, 0x02, 0xc0, 0xb5, 0xe4, 0xcf, 0xdc, 0x6d, 0x85, 0xf3, + 0x8b, 0x73, 0x2f, 0x2e, 0x41, 0x9e, 0x4d, 0xb5, 0x62, 0x24, 0x4b, 0xb6, 0x8d, 0x60, 0x88, 0x39, + 0x36, 0x1d, 0xb4, 0x03, 0x57, 0x46, 0x1e, 0x3e, 0x24, 0x5f, 0xc2, 0x1d, 0xfe, 0x2b, 0x90, 0xf8, + 0x4f, 0x37, 0xae, 0x96, 0xdf, 0x63, 0xef, 0xf3, 0x00, 0xb7, 0x42, 0x76, 0x14, 0x48, 0xa4, 0x8d, + 0xc6, 0x79, 0x0a, 0x73, 0xf5, 0xf0, 0x58, 0xad, 0x2e, 0x3b, 0xf2, 0x51, 0x11, 0xa6, 0x47, 0x41, + 0x21, 0x03, 0x3d, 0x55, 0xca, 0x5a, 0x1c, 0x18, 0x07, 0xf0, 0xff, 0xc5, 0x76, 0x71, 0xe2, 0x1d, + 0xa6, 0x8e, 0xbd, 0xc5, 0xa4, 0xf7, 0x2e, 0x5c, 0x8d, 0xf6, 0x05, 0xa9, 0x10, 0x92, 0xf9, 0x3a, + 0x7b, 0xdc, 0xd4, 0x4a, 0x7c, 0x41, 0x0a, 0xcc, 0x1c, 0xe2, 0x2e, 0x3b, 0xf6, 0xf0, 0xdc, 0x23, + 0xc6, 0x7c, 0x9a, 0x17, 0xdf, 0x00, 0x4c, 0x87, 0x1b, 0x84, 0x5e, 0x43, 0x6d, 0xaf, 0xbd, 0xdd, + 0x6e, 0x74, 0xf6, 0x9b, 0x66, 0xd3, 0x6c, 0x9b, 0xdb, 0xef, 0xcc, 0x83, 0xc6, 0x4e, 0x67, 0xbf, + 0xb9, 0xd7, 0x6a, 0xd4, 0xcd, 0xb7, 0x66, 0x63, 0xa7, 0x20, 0x28, 0x0f, 0x4e, 0xcf, 0xf4, 0xfc, + 0x0d, 0x02, 0x92, 0x21, 0xe4, 0xba, 0xe0, 0x63, 0x01, 0x28, 0x99, 0xd3, 0x33, 0x5d, 0x0a, 0x6a, + 0xa4, 0xc2, 0x3c, 0xef, 0xb4, 0xad, 0x4f, 0x1f, 0x5a, 0x8d, 0x66, 0x41, 0x54, 0x72, 0xa7, 0x67, + 0xfa, 0x6a, 0x04, 0x17, 0xca, 0xb0, 0x99, 0xe2, 0xca, 0xa0, 0x56, 0xa4, 0x93, 0x1f, 0xaa, 0x50, + 0xdb, 0x3d, 0xbf, 0x52, 0xc1, 0xe5, 0x95, 0x0a, 0x7e, 0x5d, 0xa9, 0xe0, 0xeb, 0xb5, 0x2a, 0x5c, + 0x5e, 0xab, 0xc2, 0xcf, 0x6b, 0x55, 0x38, 0xa8, 0xf6, 0x09, 0x3b, 0x3a, 0xee, 0x05, 0x57, 0x57, + 0xb1, 0xa9, 0x3f, 0xa4, 0x7e, 0x85, 0xf4, 0xec, 0xf5, 0xf9, 0xa3, 0xfa, 0x72, 0x73, 0x3d, 0xf1, + 0x64, 0xb3, 0xc9, 0x08, 0xfb, 0xbd, 0x95, 0xf0, 0x41, 0xdd, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, + 0x7d, 0x5b, 0xa0, 0xa3, 0xd8, 0x05, 0x00, 0x00, +} + +func (m *ConnectionEnd) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionEnd) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionEnd) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DelayPeriod != 0 { + i = encodeVarintConnection(dAtA, i, uint64(m.DelayPeriod)) + i-- + dAtA[i] = 0x28 + } + { + size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConnection(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.State != 0 { + i = encodeVarintConnection(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x18 + } + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConnection(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintConnection(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IdentifiedConnection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IdentifiedConnection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IdentifiedConnection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DelayPeriod != 0 { + i = encodeVarintConnection(dAtA, i, uint64(m.DelayPeriod)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConnection(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.State != 0 { + i = encodeVarintConnection(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x20 + } + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConnection(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintConnection(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintConnection(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Counterparty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counterparty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counterparty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Prefix.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConnection(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarintConnection(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintConnection(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientPaths) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientPaths) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintConnection(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ConnectionPaths) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionPaths) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintConnection(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintConnection(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Features) > 0 { + for iNdEx := len(m.Features) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Features[iNdEx]) + copy(dAtA[i:], m.Features[iNdEx]) + i = encodeVarintConnection(dAtA, i, uint64(len(m.Features[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintConnection(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintConnection(dAtA []byte, offset int, v uint64) int { + offset -= sovConnection(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConnectionEnd) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovConnection(uint64(l)) + } + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovConnection(uint64(l)) + } + } + if m.State != 0 { + n += 1 + sovConnection(uint64(m.State)) + } + l = m.Counterparty.Size() + n += 1 + l + sovConnection(uint64(l)) + if m.DelayPeriod != 0 { + n += 1 + sovConnection(uint64(m.DelayPeriod)) + } + return n +} + +func (m *IdentifiedConnection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovConnection(uint64(l)) + } + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovConnection(uint64(l)) + } + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovConnection(uint64(l)) + } + } + if m.State != 0 { + n += 1 + sovConnection(uint64(m.State)) + } + l = m.Counterparty.Size() + n += 1 + l + sovConnection(uint64(l)) + if m.DelayPeriod != 0 { + n += 1 + sovConnection(uint64(m.DelayPeriod)) + } + return n +} + +func (m *Counterparty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovConnection(uint64(l)) + } + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sovConnection(uint64(l)) + } + l = m.Prefix.Size() + n += 1 + l + sovConnection(uint64(l)) + return n +} + +func (m *ClientPaths) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovConnection(uint64(l)) + } + } + return n +} + +func (m *ConnectionPaths) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovConnection(uint64(l)) + } + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovConnection(uint64(l)) + } + } + return n +} + +func (m *Version) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovConnection(uint64(l)) + } + if len(m.Features) > 0 { + for _, s := range m.Features { + l = len(s) + n += 1 + l + sovConnection(uint64(l)) + } + } + return n +} + +func sovConnection(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozConnection(x uint64) (n int) { + return sovConnection(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ConnectionEnd) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectionEnd: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectionEnd: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, &Version{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelayPeriod", wireType) + } + m.DelayPeriod = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelayPeriod |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipConnection(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthConnection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IdentifiedConnection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IdentifiedConnection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IdentifiedConnection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, &Version{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelayPeriod", wireType) + } + m.DelayPeriod = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelayPeriod |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipConnection(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthConnection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counterparty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counterparty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counterparty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Prefix.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipConnection(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthConnection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientPaths) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipConnection(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthConnection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionPaths) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectionPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectionPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipConnection(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthConnection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConnection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthConnection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthConnection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Features = append(m.Features, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipConnection(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthConnection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipConnection(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowConnection + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowConnection + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowConnection + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthConnection + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupConnection + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthConnection + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthConnection = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowConnection = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupConnection = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/03-connection/types/connection_test.go b/core/03-connection/types/connection_test.go new file mode 100644 index 0000000000..e7e91538c4 --- /dev/null +++ b/core/03-connection/types/connection_test.go @@ -0,0 +1,121 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +var ( + chainID = "gaiamainnet" + connectionID = "connection-0" + clientID = "clientidone" + connectionID2 = "connectionidtwo" + clientID2 = "clientidtwo" + invalidConnectionID = "(invalidConnectionID)" + clientHeight = clienttypes.NewHeight(0, 6) +) + +func TestConnectionValidateBasic(t *testing.T) { + testCases := []struct { + name string + connection types.ConnectionEnd + expPass bool + }{ + { + "valid connection", + types.ConnectionEnd{clientID, []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500}, + true, + }, + { + "invalid client id", + types.ConnectionEnd{"(clientID1)", []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500}, + false, + }, + { + "empty versions", + types.ConnectionEnd{clientID, nil, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500}, + false, + }, + { + "invalid version", + types.ConnectionEnd{clientID, []*types.Version{{}}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500}, + false, + }, + { + "invalid counterparty", + types.ConnectionEnd{clientID, []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, emptyPrefix}, 500}, + false, + }, + } + + for i, tc := range testCases { + tc := tc + + err := tc.connection.ValidateBasic() + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +func TestCounterpartyValidateBasic(t *testing.T) { + testCases := []struct { + name string + counterparty types.Counterparty + expPass bool + }{ + {"valid counterparty", types.Counterparty{clientID, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, true}, + {"invalid client id", types.Counterparty{"(InvalidClient)", connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, false}, + {"invalid connection id", types.Counterparty{clientID, "(InvalidConnection)", commitmenttypes.NewMerklePrefix([]byte("prefix"))}, false}, + {"invalid prefix", types.Counterparty{clientID, connectionID2, emptyPrefix}, false}, + } + + for i, tc := range testCases { + tc := tc + + err := tc.counterparty.ValidateBasic() + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +func TestIdentifiedConnectionValidateBasic(t *testing.T) { + testCases := []struct { + name string + connection types.IdentifiedConnection + expPass bool + }{ + { + "valid connection", + types.NewIdentifiedConnection(clientID, types.ConnectionEnd{clientID, []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500}), + true, + }, + { + "invalid connection id", + types.NewIdentifiedConnection("(connectionIDONE)", types.ConnectionEnd{clientID, []*types.Version{ibctesting.ConnectionVersion}, types.INIT, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, 500}), + false, + }, + } + + for i, tc := range testCases { + tc := tc + + err := tc.connection.ValidateBasic() + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + } + } +} diff --git a/core/03-connection/types/errors.go b/core/03-connection/types/errors.go new file mode 100644 index 0000000000..107a0e087c --- /dev/null +++ b/core/03-connection/types/errors.go @@ -0,0 +1,19 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// IBC connection sentinel errors +var ( + ErrConnectionExists = sdkerrors.Register(SubModuleName, 2, "connection already exists") + ErrConnectionNotFound = sdkerrors.Register(SubModuleName, 3, "connection not found") + ErrClientConnectionPathsNotFound = sdkerrors.Register(SubModuleName, 4, "light client connection paths not found") + ErrConnectionPath = sdkerrors.Register(SubModuleName, 5, "connection path is not associated to the given light client") + ErrInvalidConnectionState = sdkerrors.Register(SubModuleName, 6, "invalid connection state") + ErrInvalidCounterparty = sdkerrors.Register(SubModuleName, 7, "invalid counterparty connection") + ErrInvalidConnection = sdkerrors.Register(SubModuleName, 8, "invalid connection") + ErrInvalidVersion = sdkerrors.Register(SubModuleName, 9, "invalid connection version") + ErrVersionNegotiationFailed = sdkerrors.Register(SubModuleName, 10, "connection version negotiation failed") + ErrInvalidConnectionIdentifier = sdkerrors.Register(SubModuleName, 11, "invalid connection identifier") +) diff --git a/core/03-connection/types/events.go b/core/03-connection/types/events.go new file mode 100644 index 0000000000..3cb5997bd1 --- /dev/null +++ b/core/03-connection/types/events.go @@ -0,0 +1,25 @@ +package types + +import ( + "fmt" + + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// IBC connection events +const ( + AttributeKeyConnectionID = "connection_id" + AttributeKeyClientID = "client_id" + AttributeKeyCounterpartyClientID = "counterparty_client_id" + AttributeKeyCounterpartyConnectionID = "counterparty_connection_id" +) + +// IBC connection events vars +var ( + EventTypeConnectionOpenInit = MsgConnectionOpenInit{}.Type() + EventTypeConnectionOpenTry = MsgConnectionOpenTry{}.Type() + EventTypeConnectionOpenAck = MsgConnectionOpenAck{}.Type() + EventTypeConnectionOpenConfirm = MsgConnectionOpenConfirm{}.Type() + + AttributeValueCategory = fmt.Sprintf("%s_%s", host.ModuleName, SubModuleName) +) diff --git a/core/03-connection/types/expected_keepers.go b/core/03-connection/types/expected_keepers.go new file mode 100644 index 0000000000..9fc9958671 --- /dev/null +++ b/core/03-connection/types/expected_keepers.go @@ -0,0 +1,16 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// ClientKeeper expected account IBC client keeper +type ClientKeeper interface { + GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool) + GetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) (exported.ConsensusState, bool) + GetSelfConsensusState(ctx sdk.Context, height exported.Height) (exported.ConsensusState, bool) + ValidateSelfClient(ctx sdk.Context, clientState exported.ClientState) error + IterateClients(ctx sdk.Context, cb func(string, exported.ClientState) bool) + ClientStore(ctx sdk.Context, clientID string) sdk.KVStore +} diff --git a/core/03-connection/types/genesis.go b/core/03-connection/types/genesis.go new file mode 100644 index 0000000000..b10c300a84 --- /dev/null +++ b/core/03-connection/types/genesis.go @@ -0,0 +1,76 @@ +package types + +import ( + "fmt" + + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// NewConnectionPaths creates a ConnectionPaths instance. +func NewConnectionPaths(id string, paths []string) ConnectionPaths { + return ConnectionPaths{ + ClientId: id, + Paths: paths, + } +} + +// NewGenesisState creates a GenesisState instance. +func NewGenesisState( + connections []IdentifiedConnection, connPaths []ConnectionPaths, + nextConnectionSequence uint64, +) GenesisState { + return GenesisState{ + Connections: connections, + ClientConnectionPaths: connPaths, + NextConnectionSequence: nextConnectionSequence, + } +} + +// DefaultGenesisState returns the ibc connection submodule's default genesis state. +func DefaultGenesisState() GenesisState { + return GenesisState{ + Connections: []IdentifiedConnection{}, + ClientConnectionPaths: []ConnectionPaths{}, + NextConnectionSequence: 0, + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + // keep track of the max sequence to ensure it is less than + // the next sequence used in creating connection identifers. + var maxSequence uint64 = 0 + + for i, conn := range gs.Connections { + sequence, err := ParseConnectionSequence(conn.Id) + if err != nil { + return err + } + + if sequence > maxSequence { + maxSequence = sequence + } + + if err := conn.ValidateBasic(); err != nil { + return fmt.Errorf("invalid connection %v index %d: %w", conn, i, err) + } + } + + for i, conPaths := range gs.ClientConnectionPaths { + if err := host.ClientIdentifierValidator(conPaths.ClientId); err != nil { + return fmt.Errorf("invalid client connection path %d: %w", i, err) + } + for _, connectionID := range conPaths.Paths { + if err := host.ConnectionIdentifierValidator(connectionID); err != nil { + return fmt.Errorf("invalid client connection ID (%s) in connection paths %d: %w", connectionID, i, err) + } + } + } + + if maxSequence != 0 && maxSequence >= gs.NextConnectionSequence { + return fmt.Errorf("next connection sequence %d must be greater than maximum sequence used in connection identifier %d", gs.NextConnectionSequence, maxSequence) + } + + return nil +} diff --git a/core/03-connection/types/genesis.pb.go b/core/03-connection/types/genesis.pb.go new file mode 100644 index 0000000000..5dce20ca7b --- /dev/null +++ b/core/03-connection/types/genesis.pb.go @@ -0,0 +1,438 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/connection/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ibc connection submodule's genesis state. +type GenesisState struct { + Connections []IdentifiedConnection `protobuf:"bytes,1,rep,name=connections,proto3" json:"connections"` + ClientConnectionPaths []ConnectionPaths `protobuf:"bytes,2,rep,name=client_connection_paths,json=clientConnectionPaths,proto3" json:"client_connection_paths" yaml:"client_connection_paths"` + // the sequence for the next generated connection identifier + NextConnectionSequence uint64 `protobuf:"varint,3,opt,name=next_connection_sequence,json=nextConnectionSequence,proto3" json:"next_connection_sequence,omitempty" yaml:"next_connection_sequence"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_1d3565a164ba596e, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetConnections() []IdentifiedConnection { + if m != nil { + return m.Connections + } + return nil +} + +func (m *GenesisState) GetClientConnectionPaths() []ConnectionPaths { + if m != nil { + return m.ClientConnectionPaths + } + return nil +} + +func (m *GenesisState) GetNextConnectionSequence() uint64 { + if m != nil { + return m.NextConnectionSequence + } + return 0 +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibcgo.core.connection.v1.GenesisState") +} + +func init() { + proto.RegisterFile("ibcgo/core/connection/v1/genesis.proto", fileDescriptor_1d3565a164ba596e) +} + +var fileDescriptor_1d3565a164ba596e = []byte{ + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x3f, 0x4f, 0xc2, 0x40, + 0x18, 0xc6, 0x7b, 0x40, 0x1c, 0x8a, 0x53, 0xe3, 0x9f, 0x86, 0xe1, 0x4a, 0x6a, 0x42, 0x60, 0xe0, + 0x4e, 0x64, 0x73, 0xac, 0x83, 0x31, 0x2e, 0x06, 0x12, 0x07, 0x13, 0x43, 0xe8, 0xf1, 0x5a, 0x2e, + 0x81, 0x3b, 0xe4, 0x0e, 0x22, 0x1f, 0xc1, 0xcd, 0x8f, 0xc5, 0x88, 0x9b, 0x53, 0x63, 0xda, 0x6f, + 0xc0, 0x27, 0x30, 0x6d, 0xd5, 0x56, 0x93, 0x6e, 0x97, 0xf7, 0xfd, 0x3d, 0xbf, 0xf7, 0x92, 0xc7, + 0x6c, 0x71, 0x9f, 0x05, 0x92, 0x32, 0xb9, 0x04, 0xca, 0xa4, 0x10, 0xc0, 0x34, 0x97, 0x82, 0xae, + 0x7b, 0x34, 0x00, 0x01, 0x8a, 0x2b, 0xb2, 0x58, 0x4a, 0x2d, 0x2d, 0x3b, 0xe5, 0x48, 0xc2, 0x91, + 0x9c, 0x23, 0xeb, 0x5e, 0xe3, 0x28, 0x90, 0x81, 0x4c, 0x21, 0x9a, 0xbc, 0x32, 0xbe, 0xd1, 0x29, + 0xf5, 0x16, 0xd2, 0x29, 0xea, 0xbe, 0x57, 0xcc, 0xc3, 0xeb, 0xec, 0xd8, 0x50, 0x8f, 0x35, 0x58, + 0xf7, 0x66, 0x3d, 0x87, 0x94, 0x8d, 0x9a, 0xd5, 0x76, 0xfd, 0x82, 0x90, 0xb2, 0x1f, 0x90, 0x9b, + 0x09, 0x08, 0xcd, 0x9f, 0x38, 0x4c, 0xae, 0x7e, 0xe7, 0x5e, 0x6d, 0x1b, 0x3a, 0xc6, 0xa0, 0x28, + 0xb2, 0x5e, 0x91, 0x79, 0xca, 0x66, 0x1c, 0x84, 0x1e, 0xe5, 0xe3, 0xd1, 0x62, 0xac, 0xa7, 0xca, + 0xae, 0xa4, 0x47, 0x3a, 0xe5, 0x47, 0x72, 0xf5, 0x5d, 0x12, 0xf0, 0x5a, 0x89, 0x7f, 0x1f, 0x3a, + 0x78, 0x33, 0x9e, 0xcf, 0x2e, 0xdd, 0x12, 0xaf, 0x3b, 0x38, 0xce, 0x36, 0xff, 0xe2, 0xd6, 0xa3, + 0x69, 0x0b, 0x78, 0xf9, 0x13, 0x50, 0xf0, 0xbc, 0x02, 0xc1, 0xc0, 0xae, 0x36, 0x51, 0xbb, 0xe6, + 0x9d, 0xed, 0x43, 0xc7, 0xc9, 0xe4, 0x65, 0xa4, 0x3b, 0x38, 0x49, 0x56, 0xb9, 0x7b, 0xf8, 0xbd, + 0xf0, 0x6e, 0xb7, 0x11, 0x46, 0xbb, 0x08, 0xa3, 0xcf, 0x08, 0xa3, 0xb7, 0x18, 0x1b, 0xbb, 0x18, + 0x1b, 0x1f, 0x31, 0x36, 0x1e, 0x7a, 0x01, 0xd7, 0xd3, 0x95, 0x4f, 0x98, 0x9c, 0x53, 0x26, 0xd5, + 0x5c, 0x2a, 0xca, 0x7d, 0xd6, 0xfd, 0xe9, 0xea, 0xbc, 0xdf, 0x2d, 0xd4, 0xa5, 0x37, 0x0b, 0x50, + 0xfe, 0x41, 0xda, 0x53, 0xff, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x31, 0x41, 0xfb, 0xcb, 0x2c, 0x02, + 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextConnectionSequence != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.NextConnectionSequence)) + i-- + dAtA[i] = 0x18 + } + if len(m.ClientConnectionPaths) > 0 { + for iNdEx := len(m.ClientConnectionPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClientConnectionPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Connections) > 0 { + for iNdEx := len(m.Connections) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Connections[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Connections) > 0 { + for _, e := range m.Connections { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.ClientConnectionPaths) > 0 { + for _, e := range m.ClientConnectionPaths { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if m.NextConnectionSequence != 0 { + n += 1 + sovGenesis(uint64(m.NextConnectionSequence)) + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connections", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Connections = append(m.Connections, IdentifiedConnection{}) + if err := m.Connections[len(m.Connections)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConnectionPaths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientConnectionPaths = append(m.ClientConnectionPaths, ConnectionPaths{}) + if err := m.ClientConnectionPaths[len(m.ClientConnectionPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextConnectionSequence", wireType) + } + m.NextConnectionSequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextConnectionSequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/03-connection/types/genesis_test.go b/core/03-connection/types/genesis_test.go new file mode 100644 index 0000000000..846837f9af --- /dev/null +++ b/core/03-connection/types/genesis_test.go @@ -0,0 +1,114 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func TestValidateGenesis(t *testing.T) { + + testCases := []struct { + name string + genState types.GenesisState + expPass bool + }{ + { + name: "default", + genState: types.DefaultGenesisState(), + expPass: true, + }, + { + name: "valid genesis", + genState: types.NewGenesisState( + []types.IdentifiedConnection{ + types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)), + }, + []types.ConnectionPaths{ + {clientID, []string{connectionID}}, + }, + 0, + ), + expPass: true, + }, + { + name: "invalid connection", + genState: types.NewGenesisState( + []types.IdentifiedConnection{ + types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, "(CLIENTIDONE)", types.Counterparty{clientID, connectionID, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)), + }, + []types.ConnectionPaths{ + {clientID, []string{connectionID}}, + }, + 0, + ), + expPass: false, + }, + { + name: "invalid client id", + genState: types.NewGenesisState( + []types.IdentifiedConnection{ + types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)), + }, + []types.ConnectionPaths{ + {"(CLIENTIDONE)", []string{connectionID}}, + }, + 0, + ), + expPass: false, + }, + { + name: "invalid path", + genState: types.NewGenesisState( + []types.IdentifiedConnection{ + types.NewIdentifiedConnection(connectionID, types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)), + }, + []types.ConnectionPaths{ + {clientID, []string{invalidConnectionID}}, + }, + 0, + ), + expPass: false, + }, + { + name: "invalid connection identifier", + genState: types.NewGenesisState( + []types.IdentifiedConnection{ + types.NewIdentifiedConnection("conn-0", types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)), + }, + []types.ConnectionPaths{ + {clientID, []string{connectionID}}, + }, + 0, + ), + expPass: false, + }, + { + name: "next connection sequence is not greater than maximum connection identifier sequence provided", + genState: types.NewGenesisState( + []types.IdentifiedConnection{ + types.NewIdentifiedConnection(types.FormatConnectionIdentifier(10), types.NewConnectionEnd(types.INIT, clientID, types.Counterparty{clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))}, []*types.Version{ibctesting.ConnectionVersion}, 500)), + }, + []types.ConnectionPaths{ + {clientID, []string{connectionID}}, + }, + 0, + ), + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + err := tc.genState.Validate() + if tc.expPass { + require.NoError(t, err, tc.name) + } else { + require.Error(t, err, tc.name) + } + } +} diff --git a/core/03-connection/types/keys.go b/core/03-connection/types/keys.go new file mode 100644 index 0000000000..65af565c2a --- /dev/null +++ b/core/03-connection/types/keys.go @@ -0,0 +1,61 @@ +package types + +import ( + "fmt" + "regexp" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +const ( + // SubModuleName defines the IBC connection name + SubModuleName = "connection" + + // StoreKey is the store key string for IBC connections + StoreKey = SubModuleName + + // RouterKey is the message route for IBC connections + RouterKey = SubModuleName + + // QuerierRoute is the querier route for IBC connections + QuerierRoute = SubModuleName + + // KeyNextConnectionSequence is the key used to store the next connection sequence in + // the keeper. + KeyNextConnectionSequence = "nextConnectionSequence" + + // ConnectionPrefix is the prefix used when creating a connection identifier + ConnectionPrefix = "connection-" +) + +// FormatConnectionIdentifier returns the connection identifier with the sequence appended. +// This is a SDK specific format not enforced by IBC protocol. +func FormatConnectionIdentifier(sequence uint64) string { + return fmt.Sprintf("%s%d", ConnectionPrefix, sequence) +} + +// IsConnectionIDFormat checks if a connectionID is in the format required on the SDK for +// parsing connection identifiers. The connection identifier must be in the form: `connection-{N} +var IsConnectionIDFormat = regexp.MustCompile(`^connection-[0-9]{1,20}$`).MatchString + +// IsValidConnectionID checks if the connection identifier is valid and can be parsed to +// the connection identifier format. +func IsValidConnectionID(connectionID string) bool { + _, err := ParseConnectionSequence(connectionID) + return err == nil +} + +// ParseConnectionSequence parses the connection sequence from the connection identifier. +func ParseConnectionSequence(connectionID string) (uint64, error) { + if !IsConnectionIDFormat(connectionID) { + return 0, sdkerrors.Wrap(host.ErrInvalidID, "connection identifier is not in the format: `connection-{N}`") + } + + sequence, err := host.ParseIdentifier(connectionID, ConnectionPrefix) + if err != nil { + return 0, sdkerrors.Wrap(err, "invalid connection identifier") + } + + return sequence, nil +} diff --git a/core/03-connection/types/keys_test.go b/core/03-connection/types/keys_test.go new file mode 100644 index 0000000000..6adb8090f8 --- /dev/null +++ b/core/03-connection/types/keys_test.go @@ -0,0 +1,49 @@ +package types_test + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" +) + +// tests ParseConnectionSequence and IsValidConnectionID +func TestParseConnectionSequence(t *testing.T) { + testCases := []struct { + name string + connectionID string + expSeq uint64 + expPass bool + }{ + {"valid 0", "connection-0", 0, true}, + {"valid 1", "connection-1", 1, true}, + {"valid large sequence", types.FormatConnectionIdentifier(math.MaxUint64), math.MaxUint64, true}, + // one above uint64 max + {"invalid uint64", "connection-18446744073709551616", 0, false}, + // uint64 == 20 characters + {"invalid large sequence", "connection-2345682193567182931243", 0, false}, + {"capital prefix", "Connection-0", 0, false}, + {"double prefix", "connection-connection-0", 0, false}, + {"missing dash", "connection0", 0, false}, + {"blank id", " ", 0, false}, + {"empty id", "", 0, false}, + {"negative sequence", "connection--1", 0, false}, + } + + for _, tc := range testCases { + + seq, err := types.ParseConnectionSequence(tc.connectionID) + valid := types.IsValidConnectionID(tc.connectionID) + require.Equal(t, tc.expSeq, seq) + + if tc.expPass { + require.NoError(t, err, tc.name) + require.True(t, valid) + } else { + require.Error(t, err, tc.name) + require.False(t, valid) + } + } +} diff --git a/core/03-connection/types/msgs.go b/core/03-connection/types/msgs.go new file mode 100644 index 0000000000..3ba1aed8e7 --- /dev/null +++ b/core/03-connection/types/msgs.go @@ -0,0 +1,354 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + _ sdk.Msg = &MsgConnectionOpenInit{} + _ sdk.Msg = &MsgConnectionOpenConfirm{} + _ sdk.Msg = &MsgConnectionOpenAck{} + _ sdk.Msg = &MsgConnectionOpenTry{} + + _ codectypes.UnpackInterfacesMessage = MsgConnectionOpenTry{} + _ codectypes.UnpackInterfacesMessage = MsgConnectionOpenAck{} +) + +// NewMsgConnectionOpenInit creates a new MsgConnectionOpenInit instance. It sets the +// counterparty connection identifier to be empty. +//nolint:interfacer +func NewMsgConnectionOpenInit( + clientID, counterpartyClientID string, + counterpartyPrefix commitmenttypes.MerklePrefix, + version *Version, delayPeriod uint64, signer sdk.AccAddress, +) *MsgConnectionOpenInit { + // counterparty must have the same delay period + counterparty := NewCounterparty(counterpartyClientID, "", counterpartyPrefix) + return &MsgConnectionOpenInit{ + ClientId: clientID, + Counterparty: counterparty, + Version: version, + DelayPeriod: delayPeriod, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgConnectionOpenInit) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgConnectionOpenInit) Type() string { + return "connection_open_init" +} + +// ValidateBasic implements sdk.Msg. +func (msg MsgConnectionOpenInit) ValidateBasic() error { + if err := host.ClientIdentifierValidator(msg.ClientId); err != nil { + return sdkerrors.Wrap(err, "invalid client ID") + } + if msg.Counterparty.ConnectionId != "" { + return sdkerrors.Wrap(ErrInvalidCounterparty, "counterparty connection identifier must be empty") + } + + // NOTE: Version can be nil on MsgConnectionOpenInit + if msg.Version != nil { + if err := ValidateVersion(msg.Version); err != nil { + return sdkerrors.Wrap(err, "basic validation of the provided version failed") + } + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return msg.Counterparty.ValidateBasic() +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgConnectionOpenInit) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgConnectionOpenInit) GetSigners() []sdk.AccAddress { + accAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{accAddr} +} + +// NewMsgConnectionOpenTry creates a new MsgConnectionOpenTry instance +//nolint:interfacer +func NewMsgConnectionOpenTry( + previousConnectionID, clientID, counterpartyConnectionID, + counterpartyClientID string, counterpartyClient exported.ClientState, + counterpartyPrefix commitmenttypes.MerklePrefix, + counterpartyVersions []*Version, delayPeriod uint64, + proofInit, proofClient, proofConsensus []byte, + proofHeight, consensusHeight clienttypes.Height, signer sdk.AccAddress, +) *MsgConnectionOpenTry { + counterparty := NewCounterparty(counterpartyClientID, counterpartyConnectionID, counterpartyPrefix) + csAny, _ := clienttypes.PackClientState(counterpartyClient) + return &MsgConnectionOpenTry{ + PreviousConnectionId: previousConnectionID, + ClientId: clientID, + ClientState: csAny, + Counterparty: counterparty, + CounterpartyVersions: counterpartyVersions, + DelayPeriod: delayPeriod, + ProofInit: proofInit, + ProofClient: proofClient, + ProofConsensus: proofConsensus, + ProofHeight: proofHeight, + ConsensusHeight: consensusHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgConnectionOpenTry) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgConnectionOpenTry) Type() string { + return "connection_open_try" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgConnectionOpenTry) ValidateBasic() error { + // an empty connection identifier indicates that a connection identifier should be generated + if msg.PreviousConnectionId != "" { + if !IsValidConnectionID(msg.PreviousConnectionId) { + return sdkerrors.Wrap(ErrInvalidConnectionIdentifier, "invalid previous connection ID") + } + } + if err := host.ClientIdentifierValidator(msg.ClientId); err != nil { + return sdkerrors.Wrap(err, "invalid client ID") + } + // counterparty validate basic allows empty counterparty connection identifiers + if err := host.ConnectionIdentifierValidator(msg.Counterparty.ConnectionId); err != nil { + return sdkerrors.Wrap(err, "invalid counterparty connection ID") + } + if msg.ClientState == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "counterparty client is nil") + } + clientState, err := clienttypes.UnpackClientState(msg.ClientState) + if err != nil { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "unpack err: %v", err) + } + if err := clientState.Validate(); err != nil { + return sdkerrors.Wrap(err, "counterparty client is invalid") + } + if len(msg.CounterpartyVersions) == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidVersion, "empty counterparty versions") + } + for i, version := range msg.CounterpartyVersions { + if err := ValidateVersion(version); err != nil { + return sdkerrors.Wrapf(err, "basic validation failed on version with index %d", i) + } + } + if len(msg.ProofInit) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof init") + } + if len(msg.ProofClient) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit empty proof client") + } + if len(msg.ProofConsensus) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof of consensus state") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + if msg.ConsensusHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "consensus height must be non-zero") + } + _, err = sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return msg.Counterparty.ValidateBasic() +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (msg MsgConnectionOpenTry) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(msg.ClientState, new(exported.ClientState)) +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgConnectionOpenTry) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgConnectionOpenTry) GetSigners() []sdk.AccAddress { + accAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{accAddr} +} + +// NewMsgConnectionOpenAck creates a new MsgConnectionOpenAck instance +//nolint:interfacer +func NewMsgConnectionOpenAck( + connectionID, counterpartyConnectionID string, counterpartyClient exported.ClientState, + proofTry, proofClient, proofConsensus []byte, + proofHeight, consensusHeight clienttypes.Height, + version *Version, + signer sdk.AccAddress, +) *MsgConnectionOpenAck { + csAny, _ := clienttypes.PackClientState(counterpartyClient) + return &MsgConnectionOpenAck{ + ConnectionId: connectionID, + CounterpartyConnectionId: counterpartyConnectionID, + ClientState: csAny, + ProofTry: proofTry, + ProofClient: proofClient, + ProofConsensus: proofConsensus, + ProofHeight: proofHeight, + ConsensusHeight: consensusHeight, + Version: version, + Signer: signer.String(), + } +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (msg MsgConnectionOpenAck) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(msg.ClientState, new(exported.ClientState)) +} + +// Route implements sdk.Msg +func (msg MsgConnectionOpenAck) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgConnectionOpenAck) Type() string { + return "connection_open_ack" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgConnectionOpenAck) ValidateBasic() error { + if !IsValidConnectionID(msg.ConnectionId) { + return ErrInvalidConnectionIdentifier + } + if err := host.ConnectionIdentifierValidator(msg.CounterpartyConnectionId); err != nil { + return sdkerrors.Wrap(err, "invalid counterparty connection ID") + } + if err := ValidateVersion(msg.Version); err != nil { + return err + } + if msg.ClientState == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "counterparty client is nil") + } + clientState, err := clienttypes.UnpackClientState(msg.ClientState) + if err != nil { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "unpack err: %v", err) + } + if err := clientState.Validate(); err != nil { + return sdkerrors.Wrap(err, "counterparty client is invalid") + } + if len(msg.ProofTry) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof try") + } + if len(msg.ProofClient) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit empty proof client") + } + if len(msg.ProofConsensus) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof of consensus state") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + if msg.ConsensusHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "consensus height must be non-zero") + } + _, err = sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return nil +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgConnectionOpenAck) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgConnectionOpenAck) GetSigners() []sdk.AccAddress { + accAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{accAddr} +} + +// NewMsgConnectionOpenConfirm creates a new MsgConnectionOpenConfirm instance +//nolint:interfacer +func NewMsgConnectionOpenConfirm( + connectionID string, proofAck []byte, proofHeight clienttypes.Height, + signer sdk.AccAddress, +) *MsgConnectionOpenConfirm { + return &MsgConnectionOpenConfirm{ + ConnectionId: connectionID, + ProofAck: proofAck, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgConnectionOpenConfirm) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgConnectionOpenConfirm) Type() string { + return "connection_open_confirm" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgConnectionOpenConfirm) ValidateBasic() error { + if !IsValidConnectionID(msg.ConnectionId) { + return ErrInvalidConnectionIdentifier + } + if len(msg.ProofAck) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof ack") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return nil +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgConnectionOpenConfirm) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgConnectionOpenConfirm) GetSigners() []sdk.AccAddress { + accAddr, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{accAddr} +} diff --git a/core/03-connection/types/msgs_test.go b/core/03-connection/types/msgs_test.go new file mode 100644 index 0000000000..6aff3b0904 --- /dev/null +++ b/core/03-connection/types/msgs_test.go @@ -0,0 +1,243 @@ +package types_test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tm-db" + + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/store/iavl" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +var ( + emptyPrefix = commitmenttypes.MerklePrefix{} + emptyProof = []byte{} +) + +type MsgTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + + proof []byte +} + +func (suite *MsgTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + + app := simapp.Setup(false) + db := dbm.NewMemDB() + store := rootmulti.NewStore(db) + storeKey := storetypes.NewKVStoreKey("iavlStoreKey") + + store.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, nil) + store.LoadVersion(0) + iavlStore := store.GetCommitStore(storeKey).(*iavl.Store) + + iavlStore.Set([]byte("KEY"), []byte("VALUE")) + _ = store.Commit() + + res := store.Query(abci.RequestQuery{ + Path: fmt.Sprintf("/%s/key", storeKey.Name()), // required path to get key/value+proof + Data: []byte("KEY"), + Prove: true, + }) + + merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) + suite.Require().NoError(err) + proof, err := app.AppCodec().MarshalBinaryBare(&merkleProof) + suite.Require().NoError(err) + + suite.proof = proof + +} + +func TestMsgTestSuite(t *testing.T) { + suite.Run(t, new(MsgTestSuite)) +} + +func (suite *MsgTestSuite) TestNewMsgConnectionOpenInit() { + prefix := commitmenttypes.NewMerklePrefix([]byte("storePrefixKey")) + signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht") + // empty versions are considered valid, the default compatible versions + // will be used in protocol. + var version *types.Version + + var testCases = []struct { + name string + msg *types.MsgConnectionOpenInit + expPass bool + }{ + {"invalid client ID", types.NewMsgConnectionOpenInit("test/iris", "clienttotest", prefix, version, 500, signer), false}, + {"invalid counterparty client ID", types.NewMsgConnectionOpenInit("clienttotest", "(clienttotest)", prefix, version, 500, signer), false}, + {"invalid counterparty connection ID", &types.MsgConnectionOpenInit{connectionID, types.NewCounterparty("clienttotest", "connectiontotest", prefix), version, 500, signer.String()}, false}, + {"empty counterparty prefix", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", emptyPrefix, version, 500, signer), false}, + {"supplied version fails basic validation", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, &types.Version{}, 500, signer), false}, + {"empty singer", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, version, 500, nil), false}, + {"success", types.NewMsgConnectionOpenInit("clienttotest", "clienttotest", prefix, version, 500, signer), true}, + } + + for _, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() { + prefix := commitmenttypes.NewMerklePrefix([]byte("storePrefixKey")) + signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht") + + clientState := ibctmtypes.NewClientState( + chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false, + ) + + // Pack consensus state into any to test unpacking error + consState := ibctmtypes.NewConsensusState( + time.Now(), commitmenttypes.NewMerkleRoot([]byte("root")), []byte("nextValsHash"), + ) + invalidAny := clienttypes.MustPackConsensusState(consState) + counterparty := types.NewCounterparty("connectiontotest", "clienttotest", prefix) + + // invalidClientState fails validateBasic + invalidClient := ibctmtypes.NewClientState( + chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clienttypes.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false, + ) + + var testCases = []struct { + name string + msg *types.MsgConnectionOpenTry + expPass bool + }{ + {"invalid connection ID", types.NewMsgConnectionOpenTry("test/conn1", "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"invalid connection ID", types.NewMsgConnectionOpenTry("(invalidconnection)", "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"invalid client ID", types.NewMsgConnectionOpenTry(connectionID, "test/iris", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"invalid counterparty connection ID", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "ibc/test", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"invalid counterparty client ID", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "test/conn1", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"invalid nil counterparty client", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", nil, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"invalid client unpacking", &types.MsgConnectionOpenTry{connectionID, "clienttotesta", invalidAny, counterparty, 500, []*types.Version{ibctesting.ConnectionVersion}, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer.String()}, false}, + {"counterparty failed validate", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", invalidClient, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"empty counterparty prefix", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, emptyPrefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"empty counterpartyVersions", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"empty proofInit", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, emptyProof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + {"empty proofClient", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, emptyProof, suite.proof, clientHeight, clientHeight, signer), false}, + {"empty proofConsensus", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, emptyProof, clientHeight, clientHeight, signer), false}, + {"invalid proofHeight", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clienttypes.ZeroHeight(), clientHeight, signer), false}, + {"invalid consensusHeight", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clienttypes.ZeroHeight(), signer), false}, + {"empty singer", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, nil), false}, + {"success", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), true}, + {"invalid version", types.NewMsgConnectionOpenTry(connectionID, "clienttotesta", "connectiontotest", "clienttotest", clientState, prefix, []*types.Version{{}}, 500, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, signer), false}, + } + + for _, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *MsgTestSuite) TestNewMsgConnectionOpenAck() { + signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht") + clientState := ibctmtypes.NewClientState( + chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false, + ) + + // Pack consensus state into any to test unpacking error + consState := ibctmtypes.NewConsensusState( + time.Now(), commitmenttypes.NewMerkleRoot([]byte("root")), []byte("nextValsHash"), + ) + invalidAny := clienttypes.MustPackConsensusState(consState) + + // invalidClientState fails validateBasic + invalidClient := ibctmtypes.NewClientState( + chainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clienttypes.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false, + ) + connectionID := "connection-0" + + var testCases = []struct { + name string + msg *types.MsgConnectionOpenAck + expPass bool + }{ + {"invalid connection ID", types.NewMsgConnectionOpenAck("test/conn1", connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false}, + {"invalid counterparty connection ID", types.NewMsgConnectionOpenAck(connectionID, "test/conn1", clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false}, + {"invalid nil counterparty client", types.NewMsgConnectionOpenAck(connectionID, connectionID, nil, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false}, + {"invalid unpacking counterparty client", &types.MsgConnectionOpenAck{connectionID, connectionID, ibctesting.ConnectionVersion, invalidAny, clientHeight, suite.proof, suite.proof, suite.proof, clientHeight, signer.String()}, false}, + {"counterparty client failed validate", types.NewMsgConnectionOpenAck(connectionID, connectionID, invalidClient, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false}, + {"empty proofTry", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, emptyProof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false}, + {"empty proofClient", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, emptyProof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false}, + {"empty proofConsensus", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, emptyProof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), false}, + {"invalid proofHeight", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clienttypes.ZeroHeight(), clientHeight, ibctesting.ConnectionVersion, signer), false}, + {"invalid consensusHeight", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clienttypes.ZeroHeight(), ibctesting.ConnectionVersion, signer), false}, + {"invalid version", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, &types.Version{}, signer), false}, + {"empty signer", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, nil), false}, + {"success", types.NewMsgConnectionOpenAck(connectionID, connectionID, clientState, suite.proof, suite.proof, suite.proof, clientHeight, clientHeight, ibctesting.ConnectionVersion, signer), true}, + } + + for _, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *MsgTestSuite) TestNewMsgConnectionOpenConfirm() { + signer, _ := sdk.AccAddressFromBech32("cosmos1ckgw5d7jfj7wwxjzs9fdrdev9vc8dzcw3n2lht") + + testMsgs := []*types.MsgConnectionOpenConfirm{ + types.NewMsgConnectionOpenConfirm("test/conn1", suite.proof, clientHeight, signer), + types.NewMsgConnectionOpenConfirm(connectionID, emptyProof, clientHeight, signer), + types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clienttypes.ZeroHeight(), signer), + types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, nil), + types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, signer), + } + + var testCases = []struct { + msg *types.MsgConnectionOpenConfirm + expPass bool + errMsg string + }{ + {testMsgs[0], false, "invalid connection ID"}, + {testMsgs[1], false, "empty proofTry"}, + {testMsgs[2], false, "invalid proofHeight"}, + {testMsgs[3], false, "empty signer"}, + {testMsgs[4], true, "success"}, + } + + for i, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err, "Msg %d failed: %s", i, tc.errMsg) + } else { + suite.Require().Error(err, "Invalid Msg %d passed: %s", i, tc.errMsg) + } + } +} diff --git a/core/03-connection/types/query.go b/core/03-connection/types/query.go new file mode 100644 index 0000000000..7661b38d9b --- /dev/null +++ b/core/03-connection/types/query.go @@ -0,0 +1,70 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + _ codectypes.UnpackInterfacesMessage = QueryConnectionClientStateResponse{} + _ codectypes.UnpackInterfacesMessage = QueryConnectionConsensusStateResponse{} +) + +// NewQueryConnectionResponse creates a new QueryConnectionResponse instance +func NewQueryConnectionResponse( + connection ConnectionEnd, proof []byte, height clienttypes.Height, +) *QueryConnectionResponse { + return &QueryConnectionResponse{ + Connection: &connection, + Proof: proof, + ProofHeight: height, + } +} + +// NewQueryClientConnectionsResponse creates a new ConnectionPaths instance +func NewQueryClientConnectionsResponse( + connectionPaths []string, proof []byte, height clienttypes.Height, +) *QueryClientConnectionsResponse { + return &QueryClientConnectionsResponse{ + ConnectionPaths: connectionPaths, + Proof: proof, + ProofHeight: height, + } +} + +// NewQueryClientConnectionsRequest creates a new QueryClientConnectionsRequest instance +func NewQueryClientConnectionsRequest(clientID string) *QueryClientConnectionsRequest { + return &QueryClientConnectionsRequest{ + ClientId: clientID, + } +} + +// NewQueryConnectionClientStateResponse creates a newQueryConnectionClientStateResponse instance +func NewQueryConnectionClientStateResponse(identifiedClientState clienttypes.IdentifiedClientState, proof []byte, height clienttypes.Height) *QueryConnectionClientStateResponse { + return &QueryConnectionClientStateResponse{ + IdentifiedClientState: &identifiedClientState, + Proof: proof, + ProofHeight: height, + } +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (qccsr QueryConnectionClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return qccsr.IdentifiedClientState.UnpackInterfaces(unpacker) +} + +// NewQueryConnectionConsensusStateResponse creates a newQueryConnectionConsensusStateResponse instance +func NewQueryConnectionConsensusStateResponse(clientID string, anyConsensusState *codectypes.Any, consensusStateHeight exported.Height, proof []byte, height clienttypes.Height) *QueryConnectionConsensusStateResponse { + return &QueryConnectionConsensusStateResponse{ + ConsensusState: anyConsensusState, + ClientId: clientID, + Proof: proof, + ProofHeight: height, + } +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (qccsr QueryConnectionConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(qccsr.ConsensusState, new(exported.ConsensusState)) +} diff --git a/core/03-connection/types/query.pb.go b/core/03-connection/types/query.pb.go new file mode 100644 index 0000000000..a03441a1ee --- /dev/null +++ b/core/03-connection/types/query.pb.go @@ -0,0 +1,2892 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/connection/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + types1 "github.com/cosmos/cosmos-sdk/codec/types" + query "github.com/cosmos/cosmos-sdk/types/query" + types "github.com/cosmos/ibc-go/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryConnectionRequest is the request type for the Query/Connection RPC +// method +type QueryConnectionRequest struct { + // connection unique identifier + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` +} + +func (m *QueryConnectionRequest) Reset() { *m = QueryConnectionRequest{} } +func (m *QueryConnectionRequest) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionRequest) ProtoMessage() {} +func (*QueryConnectionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{0} +} +func (m *QueryConnectionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionRequest.Merge(m, src) +} +func (m *QueryConnectionRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionRequest proto.InternalMessageInfo + +func (m *QueryConnectionRequest) GetConnectionId() string { + if m != nil { + return m.ConnectionId + } + return "" +} + +// QueryConnectionResponse is the response type for the Query/Connection RPC +// method. Besides the connection end, it includes a proof and the height from +// which the proof was retrieved. +type QueryConnectionResponse struct { + // connection associated with the request identifier + Connection *ConnectionEnd `protobuf:"bytes,1,opt,name=connection,proto3" json:"connection,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryConnectionResponse) Reset() { *m = QueryConnectionResponse{} } +func (m *QueryConnectionResponse) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionResponse) ProtoMessage() {} +func (*QueryConnectionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{1} +} +func (m *QueryConnectionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionResponse.Merge(m, src) +} +func (m *QueryConnectionResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionResponse proto.InternalMessageInfo + +func (m *QueryConnectionResponse) GetConnection() *ConnectionEnd { + if m != nil { + return m.Connection + } + return nil +} + +func (m *QueryConnectionResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryConnectionResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryConnectionsRequest is the request type for the Query/Connections RPC +// method +type QueryConnectionsRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryConnectionsRequest) Reset() { *m = QueryConnectionsRequest{} } +func (m *QueryConnectionsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionsRequest) ProtoMessage() {} +func (*QueryConnectionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{2} +} +func (m *QueryConnectionsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionsRequest.Merge(m, src) +} +func (m *QueryConnectionsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionsRequest proto.InternalMessageInfo + +func (m *QueryConnectionsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryConnectionsResponse is the response type for the Query/Connections RPC +// method. +type QueryConnectionsResponse struct { + // list of stored connections of the chain. + Connections []*IdentifiedConnection `protobuf:"bytes,1,rep,name=connections,proto3" json:"connections,omitempty"` + // pagination response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` + // query block height + Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"` +} + +func (m *QueryConnectionsResponse) Reset() { *m = QueryConnectionsResponse{} } +func (m *QueryConnectionsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionsResponse) ProtoMessage() {} +func (*QueryConnectionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{3} +} +func (m *QueryConnectionsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionsResponse.Merge(m, src) +} +func (m *QueryConnectionsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionsResponse proto.InternalMessageInfo + +func (m *QueryConnectionsResponse) GetConnections() []*IdentifiedConnection { + if m != nil { + return m.Connections + } + return nil +} + +func (m *QueryConnectionsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryConnectionsResponse) GetHeight() types.Height { + if m != nil { + return m.Height + } + return types.Height{} +} + +// QueryClientConnectionsRequest is the request type for the +// Query/ClientConnections RPC method +type QueryClientConnectionsRequest struct { + // client identifier associated with a connection + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` +} + +func (m *QueryClientConnectionsRequest) Reset() { *m = QueryClientConnectionsRequest{} } +func (m *QueryClientConnectionsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryClientConnectionsRequest) ProtoMessage() {} +func (*QueryClientConnectionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{4} +} +func (m *QueryClientConnectionsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClientConnectionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClientConnectionsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClientConnectionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClientConnectionsRequest.Merge(m, src) +} +func (m *QueryClientConnectionsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryClientConnectionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClientConnectionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClientConnectionsRequest proto.InternalMessageInfo + +func (m *QueryClientConnectionsRequest) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +// QueryClientConnectionsResponse is the response type for the +// Query/ClientConnections RPC method +type QueryClientConnectionsResponse struct { + // slice of all the connection paths associated with a client. + ConnectionPaths []string `protobuf:"bytes,1,rep,name=connection_paths,json=connectionPaths,proto3" json:"connection_paths,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was generated + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryClientConnectionsResponse) Reset() { *m = QueryClientConnectionsResponse{} } +func (m *QueryClientConnectionsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryClientConnectionsResponse) ProtoMessage() {} +func (*QueryClientConnectionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{5} +} +func (m *QueryClientConnectionsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryClientConnectionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryClientConnectionsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryClientConnectionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryClientConnectionsResponse.Merge(m, src) +} +func (m *QueryClientConnectionsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryClientConnectionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryClientConnectionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryClientConnectionsResponse proto.InternalMessageInfo + +func (m *QueryClientConnectionsResponse) GetConnectionPaths() []string { + if m != nil { + return m.ConnectionPaths + } + return nil +} + +func (m *QueryClientConnectionsResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryClientConnectionsResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryConnectionClientStateRequest is the request type for the +// Query/ConnectionClientState RPC method +type QueryConnectionClientStateRequest struct { + // connection identifier + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"` +} + +func (m *QueryConnectionClientStateRequest) Reset() { *m = QueryConnectionClientStateRequest{} } +func (m *QueryConnectionClientStateRequest) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionClientStateRequest) ProtoMessage() {} +func (*QueryConnectionClientStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{6} +} +func (m *QueryConnectionClientStateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionClientStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionClientStateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionClientStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionClientStateRequest.Merge(m, src) +} +func (m *QueryConnectionClientStateRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionClientStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionClientStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionClientStateRequest proto.InternalMessageInfo + +func (m *QueryConnectionClientStateRequest) GetConnectionId() string { + if m != nil { + return m.ConnectionId + } + return "" +} + +// QueryConnectionClientStateResponse is the response type for the +// Query/ConnectionClientState RPC method +type QueryConnectionClientStateResponse struct { + // client state associated with the channel + IdentifiedClientState *types.IdentifiedClientState `protobuf:"bytes,1,opt,name=identified_client_state,json=identifiedClientState,proto3" json:"identified_client_state,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryConnectionClientStateResponse) Reset() { *m = QueryConnectionClientStateResponse{} } +func (m *QueryConnectionClientStateResponse) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionClientStateResponse) ProtoMessage() {} +func (*QueryConnectionClientStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{7} +} +func (m *QueryConnectionClientStateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionClientStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionClientStateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionClientStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionClientStateResponse.Merge(m, src) +} +func (m *QueryConnectionClientStateResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionClientStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionClientStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionClientStateResponse proto.InternalMessageInfo + +func (m *QueryConnectionClientStateResponse) GetIdentifiedClientState() *types.IdentifiedClientState { + if m != nil { + return m.IdentifiedClientState + } + return nil +} + +func (m *QueryConnectionClientStateResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryConnectionClientStateResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryConnectionConsensusStateRequest is the request type for the +// Query/ConnectionConsensusState RPC method +type QueryConnectionConsensusStateRequest struct { + // connection identifier + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"` + RevisionNumber uint64 `protobuf:"varint,2,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + RevisionHeight uint64 `protobuf:"varint,3,opt,name=revision_height,json=revisionHeight,proto3" json:"revision_height,omitempty"` +} + +func (m *QueryConnectionConsensusStateRequest) Reset() { *m = QueryConnectionConsensusStateRequest{} } +func (m *QueryConnectionConsensusStateRequest) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionConsensusStateRequest) ProtoMessage() {} +func (*QueryConnectionConsensusStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{8} +} +func (m *QueryConnectionConsensusStateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionConsensusStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionConsensusStateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionConsensusStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionConsensusStateRequest.Merge(m, src) +} +func (m *QueryConnectionConsensusStateRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionConsensusStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionConsensusStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionConsensusStateRequest proto.InternalMessageInfo + +func (m *QueryConnectionConsensusStateRequest) GetConnectionId() string { + if m != nil { + return m.ConnectionId + } + return "" +} + +func (m *QueryConnectionConsensusStateRequest) GetRevisionNumber() uint64 { + if m != nil { + return m.RevisionNumber + } + return 0 +} + +func (m *QueryConnectionConsensusStateRequest) GetRevisionHeight() uint64 { + if m != nil { + return m.RevisionHeight + } + return 0 +} + +// QueryConnectionConsensusStateResponse is the response type for the +// Query/ConnectionConsensusState RPC method +type QueryConnectionConsensusStateResponse struct { + // consensus state associated with the channel + ConsensusState *types1.Any `protobuf:"bytes,1,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty"` + // client ID associated with the consensus state + ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryConnectionConsensusStateResponse) Reset() { *m = QueryConnectionConsensusStateResponse{} } +func (m *QueryConnectionConsensusStateResponse) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionConsensusStateResponse) ProtoMessage() {} +func (*QueryConnectionConsensusStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eaccf9805ea75291, []int{9} +} +func (m *QueryConnectionConsensusStateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionConsensusStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionConsensusStateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionConsensusStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionConsensusStateResponse.Merge(m, src) +} +func (m *QueryConnectionConsensusStateResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionConsensusStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionConsensusStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionConsensusStateResponse proto.InternalMessageInfo + +func (m *QueryConnectionConsensusStateResponse) GetConsensusState() *types1.Any { + if m != nil { + return m.ConsensusState + } + return nil +} + +func (m *QueryConnectionConsensusStateResponse) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *QueryConnectionConsensusStateResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryConnectionConsensusStateResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +func init() { + proto.RegisterType((*QueryConnectionRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionRequest") + proto.RegisterType((*QueryConnectionResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionResponse") + proto.RegisterType((*QueryConnectionsRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionsRequest") + proto.RegisterType((*QueryConnectionsResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionsResponse") + proto.RegisterType((*QueryClientConnectionsRequest)(nil), "ibcgo.core.connection.v1.QueryClientConnectionsRequest") + proto.RegisterType((*QueryClientConnectionsResponse)(nil), "ibcgo.core.connection.v1.QueryClientConnectionsResponse") + proto.RegisterType((*QueryConnectionClientStateRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionClientStateRequest") + proto.RegisterType((*QueryConnectionClientStateResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionClientStateResponse") + proto.RegisterType((*QueryConnectionConsensusStateRequest)(nil), "ibcgo.core.connection.v1.QueryConnectionConsensusStateRequest") + proto.RegisterType((*QueryConnectionConsensusStateResponse)(nil), "ibcgo.core.connection.v1.QueryConnectionConsensusStateResponse") +} + +func init() { + proto.RegisterFile("ibcgo/core/connection/v1/query.proto", fileDescriptor_eaccf9805ea75291) +} + +var fileDescriptor_eaccf9805ea75291 = []byte{ + // 889 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0xdd, 0xd5, 0x76, 0x52, 0x76, 0x61, 0x94, 0x65, 0x4d, 0x58, 0xb2, 0x5d, 0x6f, + 0x4b, 0x5b, 0xaa, 0xce, 0x34, 0xa9, 0x80, 0xaa, 0xbf, 0x80, 0xa0, 0x42, 0x2b, 0x24, 0x54, 0xcc, + 0x8d, 0x4b, 0x65, 0x3b, 0x53, 0xc7, 0x52, 0xe3, 0x49, 0x63, 0x27, 0x28, 0xaa, 0x22, 0x24, 0xfe, + 0x02, 0x24, 0xae, 0x5c, 0x10, 0x17, 0x4e, 0x5c, 0x39, 0x72, 0x43, 0x3d, 0x56, 0xe2, 0xc2, 0x01, + 0x55, 0x55, 0x8b, 0xb8, 0xc3, 0x5f, 0x80, 0x3c, 0x33, 0xae, 0xc7, 0x49, 0xdc, 0x26, 0xd1, 0xf6, + 0x96, 0xbc, 0x79, 0x6f, 0xde, 0xf7, 0x7d, 0xef, 0xcd, 0x97, 0xc0, 0x59, 0xd7, 0xb2, 0x1d, 0x46, + 0x6c, 0xd6, 0xa4, 0xc4, 0x66, 0x9e, 0x47, 0xed, 0xc0, 0x65, 0x1e, 0x69, 0x97, 0xc8, 0x71, 0x8b, + 0x36, 0x3b, 0xb8, 0xd1, 0x64, 0x01, 0x43, 0x1a, 0xcf, 0xc2, 0x61, 0x16, 0x8e, 0xb3, 0x70, 0xbb, + 0x54, 0xc8, 0x3b, 0xcc, 0x61, 0x3c, 0x89, 0x84, 0x9f, 0x44, 0x7e, 0xe1, 0x1d, 0x9b, 0xf9, 0x75, + 0xe6, 0x13, 0xcb, 0xf4, 0xa9, 0xb8, 0x88, 0xb4, 0x4b, 0x16, 0x0d, 0xcc, 0x12, 0x69, 0x98, 0x8e, + 0xeb, 0x99, 0xbc, 0x5c, 0xe4, 0x3e, 0x57, 0x11, 0x1c, 0xb9, 0xd4, 0x0b, 0xc2, 0xee, 0xe2, 0x93, + 0x4c, 0x59, 0x4c, 0x05, 0xa9, 0x80, 0x11, 0xa9, 0x4f, 0x1d, 0xc6, 0x9c, 0x23, 0x4a, 0xcc, 0x86, + 0x4b, 0x4c, 0xcf, 0x63, 0x01, 0x6f, 0xe5, 0xcb, 0xd3, 0x37, 0xe4, 0x29, 0xff, 0x66, 0xb5, 0x0e, + 0x89, 0xe9, 0x49, 0x8a, 0xfa, 0x16, 0x7c, 0xfd, 0x8b, 0x10, 0xe8, 0xc7, 0xd7, 0x37, 0x1a, 0xf4, + 0xb8, 0x45, 0xfd, 0x00, 0xbd, 0x80, 0xaf, 0xc4, 0x6d, 0x0e, 0xdc, 0xaa, 0x06, 0x66, 0xc0, 0xc2, + 0x94, 0x31, 0x1d, 0x07, 0xf7, 0xaa, 0xfa, 0x6f, 0x00, 0x3e, 0xe9, 0xab, 0xf7, 0x1b, 0xcc, 0xf3, + 0x29, 0xfa, 0x14, 0xc2, 0x38, 0x97, 0x57, 0xe7, 0xca, 0xf3, 0x38, 0x4d, 0x52, 0x1c, 0xdf, 0xb0, + 0xe3, 0x55, 0x0d, 0xa5, 0x14, 0xe5, 0xe1, 0xbd, 0x46, 0x93, 0xb1, 0x43, 0x2d, 0x3b, 0x03, 0x16, + 0xa6, 0x0d, 0xf1, 0x05, 0xed, 0xc0, 0x69, 0xfe, 0xe1, 0xa0, 0x46, 0x5d, 0xa7, 0x16, 0x68, 0x13, + 0xbc, 0xc1, 0xd3, 0x44, 0x03, 0xa1, 0x66, 0xbb, 0x84, 0x77, 0x79, 0x4e, 0x65, 0xf2, 0xf4, 0xfc, + 0x59, 0xc6, 0xc8, 0xf1, 0x3a, 0x11, 0xd2, 0xcd, 0x3e, 0x02, 0x7e, 0xa4, 0xc0, 0x27, 0x10, 0xc6, + 0x63, 0x93, 0x04, 0xde, 0xc6, 0x62, 0xc6, 0x38, 0x9c, 0x31, 0x16, 0xcb, 0x22, 0x67, 0x8c, 0xf7, + 0x4d, 0x87, 0xca, 0x5a, 0x43, 0xa9, 0xd4, 0xff, 0x05, 0x50, 0xeb, 0xef, 0x21, 0x55, 0xda, 0x87, + 0xb9, 0x98, 0xaa, 0xaf, 0x81, 0x99, 0x89, 0x85, 0x5c, 0x19, 0xa7, 0xcb, 0xb4, 0x57, 0xa5, 0x5e, + 0xe0, 0x1e, 0xba, 0xb4, 0xaa, 0x48, 0xae, 0x5e, 0x11, 0xea, 0xae, 0xc0, 0xce, 0x4a, 0xdd, 0x6f, + 0x83, 0x2d, 0xe0, 0xa8, 0xb8, 0xd1, 0x3a, 0xbc, 0x3f, 0xb2, 0xb6, 0xb2, 0x42, 0xdf, 0x84, 0x6f, + 0x09, 0xca, 0x3c, 0x6d, 0x80, 0xb8, 0x6f, 0xc2, 0x29, 0x71, 0x45, 0xbc, 0x5a, 0x0f, 0x44, 0x60, + 0xaf, 0xaa, 0xff, 0x0c, 0x60, 0x31, 0xad, 0x5c, 0xea, 0xb6, 0x08, 0x5f, 0x55, 0xd6, 0xb3, 0x61, + 0x06, 0x35, 0x21, 0xde, 0x94, 0xf1, 0x28, 0x8e, 0xef, 0x87, 0xe1, 0xbb, 0xdd, 0x1f, 0x0b, 0x3e, + 0xef, 0x99, 0xad, 0xc0, 0xfc, 0x65, 0x60, 0x06, 0xd1, 0x36, 0xa0, 0xad, 0x81, 0x6f, 0xa9, 0xa2, + 0xfd, 0x77, 0xfe, 0x2c, 0xdf, 0x31, 0xeb, 0x47, 0xeb, 0x7a, 0xe2, 0x58, 0xef, 0x79, 0x65, 0xff, + 0x00, 0xa8, 0xdf, 0xd4, 0x44, 0x4a, 0x62, 0xc3, 0x27, 0xee, 0xf5, 0x76, 0x1c, 0x48, 0x75, 0xfd, + 0x30, 0x45, 0x2e, 0xef, 0xd2, 0x60, 0x72, 0xca, 0x4a, 0x29, 0xb7, 0x3e, 0x76, 0x07, 0x85, 0xef, + 0x56, 0xcc, 0x5f, 0x01, 0x9c, 0xed, 0x25, 0x1a, 0x52, 0xf3, 0xfc, 0x96, 0xff, 0x12, 0x05, 0x45, + 0xf3, 0xf0, 0x51, 0x93, 0xb6, 0x5d, 0x3f, 0x3c, 0xf5, 0x5a, 0x75, 0x8b, 0x36, 0x39, 0x9d, 0x49, + 0xe3, 0x61, 0x14, 0xfe, 0x9c, 0x47, 0x13, 0x89, 0x0a, 0x35, 0x25, 0x51, 0x22, 0xbf, 0x00, 0x70, + 0xee, 0x16, 0xe4, 0x72, 0x4a, 0x5b, 0x30, 0x5c, 0x50, 0x71, 0x92, 0x98, 0x4e, 0x1e, 0x0b, 0x9b, + 0xc6, 0x91, 0x4d, 0xe3, 0x8f, 0xbc, 0x8e, 0xf1, 0xd0, 0x4e, 0x5c, 0x93, 0x7c, 0x37, 0xd9, 0xe4, + 0xbb, 0x89, 0x87, 0x33, 0x71, 0xd3, 0x70, 0x26, 0xc7, 0x1a, 0x4e, 0xf9, 0xa7, 0x07, 0xf0, 0x1e, + 0xa7, 0x88, 0x7e, 0x01, 0x10, 0xc6, 0x3c, 0xd1, 0x4a, 0xba, 0x5b, 0x0d, 0xfe, 0x6d, 0x29, 0x94, + 0x46, 0xa8, 0x10, 0xb2, 0xe9, 0x1b, 0xdf, 0xfe, 0xf1, 0xf7, 0xf7, 0xd9, 0x77, 0xd1, 0x2a, 0x71, + 0x2d, 0xfb, 0xe6, 0xdf, 0x44, 0x9f, 0x9c, 0x24, 0xa6, 0xdf, 0x45, 0x3f, 0x02, 0x98, 0x53, 0x4c, + 0x04, 0x0d, 0xdf, 0x3f, 0xf2, 0xab, 0x42, 0x79, 0x94, 0x12, 0x89, 0x79, 0x89, 0x63, 0x9e, 0x43, + 0x2f, 0x86, 0xc0, 0x8c, 0x7e, 0x07, 0xf0, 0xb5, 0x3e, 0xbb, 0x43, 0xef, 0xdf, 0xd6, 0x36, 0xc5, + 0x5f, 0x0b, 0x6b, 0xa3, 0x17, 0x4a, 0xd4, 0xdb, 0x1c, 0xf5, 0x1a, 0x7a, 0x2f, 0x15, 0xb5, 0xd8, + 0xbf, 0xa4, 0xe0, 0xd1, 0x4e, 0x76, 0xd1, 0x5f, 0x00, 0x3e, 0x1e, 0x68, 0x54, 0x68, 0x63, 0x68, + 0x0d, 0xfb, 0x3d, 0xb4, 0xb0, 0x39, 0x5e, 0xb1, 0x24, 0xb5, 0xcb, 0x49, 0x55, 0xd0, 0x87, 0x63, + 0xac, 0x0f, 0x51, 0xad, 0x14, 0xfd, 0x90, 0x85, 0x5a, 0xda, 0x23, 0x47, 0xdb, 0xc3, 0x83, 0x1c, + 0xe4, 0x6b, 0x85, 0x0f, 0xc6, 0xae, 0x97, 0x3c, 0xbf, 0xe1, 0x3c, 0x3b, 0xe8, 0xeb, 0xb1, 0x78, + 0x26, 0x7d, 0x89, 0x44, 0x1e, 0x47, 0x4e, 0x7a, 0xdc, 0xb2, 0x4b, 0x84, 0x95, 0x28, 0x07, 0x22, + 0xd0, 0xad, 0x7c, 0x76, 0x7a, 0x59, 0x04, 0x67, 0x97, 0x45, 0x70, 0x71, 0x59, 0x04, 0xdf, 0x5d, + 0x15, 0x33, 0x67, 0x57, 0xc5, 0xcc, 0x9f, 0x57, 0xc5, 0xcc, 0x57, 0x25, 0xc7, 0x0d, 0x6a, 0x2d, + 0x0b, 0xdb, 0xac, 0x4e, 0xe4, 0x1f, 0x65, 0xd7, 0xb2, 0x97, 0xa3, 0x7f, 0xb8, 0x2b, 0xab, 0xcb, + 0x0a, 0xd2, 0xa0, 0xd3, 0xa0, 0xbe, 0x75, 0x9f, 0x5b, 0xe1, 0xea, 0xff, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x02, 0x85, 0x22, 0x9e, 0xaf, 0x0b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Connection queries an IBC connection end. + Connection(ctx context.Context, in *QueryConnectionRequest, opts ...grpc.CallOption) (*QueryConnectionResponse, error) + // Connections queries all the IBC connections of a chain. + Connections(ctx context.Context, in *QueryConnectionsRequest, opts ...grpc.CallOption) (*QueryConnectionsResponse, error) + // ClientConnections queries the connection paths associated with a client + // state. + ClientConnections(ctx context.Context, in *QueryClientConnectionsRequest, opts ...grpc.CallOption) (*QueryClientConnectionsResponse, error) + // ConnectionClientState queries the client state associated with the + // connection. + ConnectionClientState(ctx context.Context, in *QueryConnectionClientStateRequest, opts ...grpc.CallOption) (*QueryConnectionClientStateResponse, error) + // ConnectionConsensusState queries the consensus state associated with the + // connection. + ConnectionConsensusState(ctx context.Context, in *QueryConnectionConsensusStateRequest, opts ...grpc.CallOption) (*QueryConnectionConsensusStateResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Connection(ctx context.Context, in *QueryConnectionRequest, opts ...grpc.CallOption) (*QueryConnectionResponse, error) { + out := new(QueryConnectionResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/Connection", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Connections(ctx context.Context, in *QueryConnectionsRequest, opts ...grpc.CallOption) (*QueryConnectionsResponse, error) { + out := new(QueryConnectionsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/Connections", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ClientConnections(ctx context.Context, in *QueryClientConnectionsRequest, opts ...grpc.CallOption) (*QueryClientConnectionsResponse, error) { + out := new(QueryClientConnectionsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ClientConnections", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ConnectionClientState(ctx context.Context, in *QueryConnectionClientStateRequest, opts ...grpc.CallOption) (*QueryConnectionClientStateResponse, error) { + out := new(QueryConnectionClientStateResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ConnectionClientState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ConnectionConsensusState(ctx context.Context, in *QueryConnectionConsensusStateRequest, opts ...grpc.CallOption) (*QueryConnectionConsensusStateResponse, error) { + out := new(QueryConnectionConsensusStateResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Query/ConnectionConsensusState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Connection queries an IBC connection end. + Connection(context.Context, *QueryConnectionRequest) (*QueryConnectionResponse, error) + // Connections queries all the IBC connections of a chain. + Connections(context.Context, *QueryConnectionsRequest) (*QueryConnectionsResponse, error) + // ClientConnections queries the connection paths associated with a client + // state. + ClientConnections(context.Context, *QueryClientConnectionsRequest) (*QueryClientConnectionsResponse, error) + // ConnectionClientState queries the client state associated with the + // connection. + ConnectionClientState(context.Context, *QueryConnectionClientStateRequest) (*QueryConnectionClientStateResponse, error) + // ConnectionConsensusState queries the consensus state associated with the + // connection. + ConnectionConsensusState(context.Context, *QueryConnectionConsensusStateRequest) (*QueryConnectionConsensusStateResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Connection(ctx context.Context, req *QueryConnectionRequest) (*QueryConnectionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Connection not implemented") +} +func (*UnimplementedQueryServer) Connections(ctx context.Context, req *QueryConnectionsRequest) (*QueryConnectionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Connections not implemented") +} +func (*UnimplementedQueryServer) ClientConnections(ctx context.Context, req *QueryClientConnectionsRequest) (*QueryClientConnectionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClientConnections not implemented") +} +func (*UnimplementedQueryServer) ConnectionClientState(ctx context.Context, req *QueryConnectionClientStateRequest) (*QueryConnectionClientStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConnectionClientState not implemented") +} +func (*UnimplementedQueryServer) ConnectionConsensusState(ctx context.Context, req *QueryConnectionConsensusStateRequest) (*QueryConnectionConsensusStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConnectionConsensusState not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Connection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryConnectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Connection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Query/Connection", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Connection(ctx, req.(*QueryConnectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Connections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryConnectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Connections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Query/Connections", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Connections(ctx, req.(*QueryConnectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ClientConnections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryClientConnectionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ClientConnections(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Query/ClientConnections", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ClientConnections(ctx, req.(*QueryClientConnectionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ConnectionClientState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryConnectionClientStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ConnectionClientState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Query/ConnectionClientState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ConnectionClientState(ctx, req.(*QueryConnectionClientStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ConnectionConsensusState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryConnectionConsensusStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ConnectionConsensusState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Query/ConnectionConsensusState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ConnectionConsensusState(ctx, req.(*QueryConnectionConsensusStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibcgo.core.connection.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Connection", + Handler: _Query_Connection_Handler, + }, + { + MethodName: "Connections", + Handler: _Query_Connections_Handler, + }, + { + MethodName: "ClientConnections", + Handler: _Query_ClientConnections_Handler, + }, + { + MethodName: "ConnectionClientState", + Handler: _Query_ConnectionClientState_Handler, + }, + { + MethodName: "ConnectionConsensusState", + Handler: _Query_ConnectionConsensusState_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibcgo/core/connection/v1/query.proto", +} + +func (m *QueryConnectionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if m.Connection != nil { + { + size, err := m.Connection.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Connections) > 0 { + for iNdEx := len(m.Connections) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Connections[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryClientConnectionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClientConnectionsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClientConnectionsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryClientConnectionsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryClientConnectionsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryClientConnectionsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionPaths) > 0 { + for iNdEx := len(m.ConnectionPaths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConnectionPaths[iNdEx]) + copy(dAtA[i:], m.ConnectionPaths[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ConnectionPaths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionClientStateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionClientStateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionClientStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionClientStateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionClientStateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionClientStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if m.IdentifiedClientState != nil { + { + size, err := m.IdentifiedClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionConsensusStateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionConsensusStateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionConsensusStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHeight != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RevisionHeight)) + i-- + dAtA[i] = 0x18 + } + if m.RevisionNumber != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RevisionNumber)) + i-- + dAtA[i] = 0x10 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionConsensusStateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionConsensusStateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionConsensusStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x1a + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0x12 + } + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryConnectionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryConnectionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Connection != nil { + l = m.Connection.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryConnectionsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryConnectionsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Connections) > 0 { + for _, e := range m.Connections { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = m.Height.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryClientConnectionsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryClientConnectionsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ConnectionPaths) > 0 { + for _, s := range m.ConnectionPaths { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryConnectionClientStateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryConnectionClientStateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IdentifiedClientState != nil { + l = m.IdentifiedClientState.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryConnectionConsensusStateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.RevisionNumber != 0 { + n += 1 + sovQuery(uint64(m.RevisionNumber)) + } + if m.RevisionHeight != 0 { + n += 1 + sovQuery(uint64(m.RevisionHeight)) + } + return n +} + +func (m *QueryConnectionConsensusStateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryConnectionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Connection == nil { + m.Connection = &ConnectionEnd{} + } + if err := m.Connection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connections", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Connections = append(m.Connections, &IdentifiedConnection{}) + if err := m.Connections[len(m.Connections)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClientConnectionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClientConnectionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClientConnectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryClientConnectionsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryClientConnectionsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryClientConnectionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionPaths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionPaths = append(m.ConnectionPaths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionClientStateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionClientStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionClientStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionClientStateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionClientStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IdentifiedClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IdentifiedClientState == nil { + m.IdentifiedClientState = &types.IdentifiedClientState{} + } + if err := m.IdentifiedClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionConsensusStateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionConsensusStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionConsensusStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionNumber", wireType) + } + m.RevisionNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevisionNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHeight", wireType) + } + m.RevisionHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevisionHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionConsensusStateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionConsensusStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionConsensusStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types1.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/03-connection/types/query.pb.gw.go b/core/03-connection/types/query.pb.gw.go new file mode 100644 index 0000000000..e597cbebb8 --- /dev/null +++ b/core/03-connection/types/query.pb.gw.go @@ -0,0 +1,602 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: ibcgo/core/connection/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Query_Connection_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["connection_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id") + } + + protoReq.ConnectionId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err) + } + + msg, err := client.Connection(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Connection_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["connection_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id") + } + + protoReq.ConnectionId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err) + } + + msg, err := server.Connection(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Connections_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Connections_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Connections_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Connections(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Connections_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Connections_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Connections(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ClientConnections_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClientConnectionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id") + } + + protoReq.ClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err) + } + + msg, err := client.ClientConnections(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ClientConnections_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryClientConnectionsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "client_id") + } + + protoReq.ClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "client_id", err) + } + + msg, err := server.ClientConnections(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ConnectionClientState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionClientStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["connection_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id") + } + + protoReq.ConnectionId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err) + } + + msg, err := client.ConnectionClientState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ConnectionClientState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionClientStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["connection_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id") + } + + protoReq.ConnectionId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err) + } + + msg, err := server.ConnectionClientState(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ConnectionConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionConsensusStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["connection_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id") + } + + protoReq.ConnectionId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err) + } + + val, ok = pathParams["revision_number"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number") + } + + protoReq.RevisionNumber, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err) + } + + val, ok = pathParams["revision_height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height") + } + + protoReq.RevisionHeight, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err) + } + + msg, err := client.ConnectionConsensusState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ConnectionConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionConsensusStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["connection_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection_id") + } + + protoReq.ConnectionId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection_id", err) + } + + val, ok = pathParams["revision_number"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number") + } + + protoReq.RevisionNumber, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err) + } + + val, ok = pathParams["revision_height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height") + } + + protoReq.RevisionHeight, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err) + } + + msg, err := server.ConnectionConsensusState(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Connection_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Connection_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Connection_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Connections_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Connections_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Connections_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClientConnections_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ClientConnections_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClientConnections_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConnectionClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ConnectionClientState_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConnectionClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConnectionConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ConnectionConsensusState_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConnectionConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Connection_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Connection_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Connection_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Connections_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Connections_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Connections_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ClientConnections_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ClientConnections_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ClientConnections_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConnectionClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ConnectionClientState_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConnectionClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConnectionConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ConnectionConsensusState_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConnectionConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Connection_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "connection", "v1", "connections", "connection_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Connections_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "connection", "v1", "connections"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ClientConnections_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"ibc", "core", "connection", "v1", "client_connections", "client_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ConnectionClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"ibc", "core", "connection", "v1", "connections", "connection_id", "client_state"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ConnectionConsensusState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 2, 7, 1, 0, 4, 1, 5, 8, 2, 9, 1, 0, 4, 1, 5, 10}, []string{"ibc", "core", "connection", "v1", "connections", "connection_id", "consensus_state", "revision", "revision_number", "height", "revision_height"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Connection_0 = runtime.ForwardResponseMessage + + forward_Query_Connections_0 = runtime.ForwardResponseMessage + + forward_Query_ClientConnections_0 = runtime.ForwardResponseMessage + + forward_Query_ConnectionClientState_0 = runtime.ForwardResponseMessage + + forward_Query_ConnectionConsensusState_0 = runtime.ForwardResponseMessage +) diff --git a/core/03-connection/types/tx.pb.go b/core/03-connection/types/tx.pb.go new file mode 100644 index 0000000000..ca9b87f57b --- /dev/null +++ b/core/03-connection/types/tx.pb.go @@ -0,0 +1,2782 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/connection/v1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/codec/types" + types1 "github.com/cosmos/ibc-go/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgConnectionOpenInit defines the msg sent by an account on Chain A to +// initialize a connection with Chain B. +type MsgConnectionOpenInit struct { + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + Counterparty Counterparty `protobuf:"bytes,2,opt,name=counterparty,proto3" json:"counterparty"` + Version *Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + DelayPeriod uint64 `protobuf:"varint,4,opt,name=delay_period,json=delayPeriod,proto3" json:"delay_period,omitempty" yaml:"delay_period"` + Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgConnectionOpenInit) Reset() { *m = MsgConnectionOpenInit{} } +func (m *MsgConnectionOpenInit) String() string { return proto.CompactTextString(m) } +func (*MsgConnectionOpenInit) ProtoMessage() {} +func (*MsgConnectionOpenInit) Descriptor() ([]byte, []int) { + return fileDescriptor_296ab31199620d78, []int{0} +} +func (m *MsgConnectionOpenInit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgConnectionOpenInit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgConnectionOpenInit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgConnectionOpenInit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgConnectionOpenInit.Merge(m, src) +} +func (m *MsgConnectionOpenInit) XXX_Size() int { + return m.Size() +} +func (m *MsgConnectionOpenInit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgConnectionOpenInit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgConnectionOpenInit proto.InternalMessageInfo + +// MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response +// type. +type MsgConnectionOpenInitResponse struct { +} + +func (m *MsgConnectionOpenInitResponse) Reset() { *m = MsgConnectionOpenInitResponse{} } +func (m *MsgConnectionOpenInitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgConnectionOpenInitResponse) ProtoMessage() {} +func (*MsgConnectionOpenInitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_296ab31199620d78, []int{1} +} +func (m *MsgConnectionOpenInitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgConnectionOpenInitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgConnectionOpenInitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgConnectionOpenInitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgConnectionOpenInitResponse.Merge(m, src) +} +func (m *MsgConnectionOpenInitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgConnectionOpenInitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgConnectionOpenInitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgConnectionOpenInitResponse proto.InternalMessageInfo + +// MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a +// connection on Chain B. +type MsgConnectionOpenTry struct { + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + // in the case of crossing hello's, when both chains call OpenInit, we need + // the connection identifier of the previous connection in state INIT + PreviousConnectionId string `protobuf:"bytes,2,opt,name=previous_connection_id,json=previousConnectionId,proto3" json:"previous_connection_id,omitempty" yaml:"previous_connection_id"` + ClientState *types.Any `protobuf:"bytes,3,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` + Counterparty Counterparty `protobuf:"bytes,4,opt,name=counterparty,proto3" json:"counterparty"` + DelayPeriod uint64 `protobuf:"varint,5,opt,name=delay_period,json=delayPeriod,proto3" json:"delay_period,omitempty" yaml:"delay_period"` + CounterpartyVersions []*Version `protobuf:"bytes,6,rep,name=counterparty_versions,json=counterpartyVersions,proto3" json:"counterparty_versions,omitempty" yaml:"counterparty_versions"` + ProofHeight types1.Height `protobuf:"bytes,7,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + // proof of the initialization the connection on Chain A: `UNITIALIZED -> + // INIT` + ProofInit []byte `protobuf:"bytes,8,opt,name=proof_init,json=proofInit,proto3" json:"proof_init,omitempty" yaml:"proof_init"` + // proof of client state included in message + ProofClient []byte `protobuf:"bytes,9,opt,name=proof_client,json=proofClient,proto3" json:"proof_client,omitempty" yaml:"proof_client"` + // proof of client consensus state + ProofConsensus []byte `protobuf:"bytes,10,opt,name=proof_consensus,json=proofConsensus,proto3" json:"proof_consensus,omitempty" yaml:"proof_consensus"` + ConsensusHeight types1.Height `protobuf:"bytes,11,opt,name=consensus_height,json=consensusHeight,proto3" json:"consensus_height" yaml:"consensus_height"` + Signer string `protobuf:"bytes,12,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgConnectionOpenTry) Reset() { *m = MsgConnectionOpenTry{} } +func (m *MsgConnectionOpenTry) String() string { return proto.CompactTextString(m) } +func (*MsgConnectionOpenTry) ProtoMessage() {} +func (*MsgConnectionOpenTry) Descriptor() ([]byte, []int) { + return fileDescriptor_296ab31199620d78, []int{2} +} +func (m *MsgConnectionOpenTry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgConnectionOpenTry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgConnectionOpenTry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgConnectionOpenTry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgConnectionOpenTry.Merge(m, src) +} +func (m *MsgConnectionOpenTry) XXX_Size() int { + return m.Size() +} +func (m *MsgConnectionOpenTry) XXX_DiscardUnknown() { + xxx_messageInfo_MsgConnectionOpenTry.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgConnectionOpenTry proto.InternalMessageInfo + +// MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type. +type MsgConnectionOpenTryResponse struct { +} + +func (m *MsgConnectionOpenTryResponse) Reset() { *m = MsgConnectionOpenTryResponse{} } +func (m *MsgConnectionOpenTryResponse) String() string { return proto.CompactTextString(m) } +func (*MsgConnectionOpenTryResponse) ProtoMessage() {} +func (*MsgConnectionOpenTryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_296ab31199620d78, []int{3} +} +func (m *MsgConnectionOpenTryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgConnectionOpenTryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgConnectionOpenTryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgConnectionOpenTryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgConnectionOpenTryResponse.Merge(m, src) +} +func (m *MsgConnectionOpenTryResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgConnectionOpenTryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgConnectionOpenTryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgConnectionOpenTryResponse proto.InternalMessageInfo + +// MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to +// acknowledge the change of connection state to TRYOPEN on Chain B. +type MsgConnectionOpenAck struct { + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"` + CounterpartyConnectionId string `protobuf:"bytes,2,opt,name=counterparty_connection_id,json=counterpartyConnectionId,proto3" json:"counterparty_connection_id,omitempty" yaml:"counterparty_connection_id"` + Version *Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + ClientState *types.Any `protobuf:"bytes,4,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` + ProofHeight types1.Height `protobuf:"bytes,5,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + // proof of the initialization the connection on Chain B: `UNITIALIZED -> + // TRYOPEN` + ProofTry []byte `protobuf:"bytes,6,opt,name=proof_try,json=proofTry,proto3" json:"proof_try,omitempty" yaml:"proof_try"` + // proof of client state included in message + ProofClient []byte `protobuf:"bytes,7,opt,name=proof_client,json=proofClient,proto3" json:"proof_client,omitempty" yaml:"proof_client"` + // proof of client consensus state + ProofConsensus []byte `protobuf:"bytes,8,opt,name=proof_consensus,json=proofConsensus,proto3" json:"proof_consensus,omitempty" yaml:"proof_consensus"` + ConsensusHeight types1.Height `protobuf:"bytes,9,opt,name=consensus_height,json=consensusHeight,proto3" json:"consensus_height" yaml:"consensus_height"` + Signer string `protobuf:"bytes,10,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgConnectionOpenAck) Reset() { *m = MsgConnectionOpenAck{} } +func (m *MsgConnectionOpenAck) String() string { return proto.CompactTextString(m) } +func (*MsgConnectionOpenAck) ProtoMessage() {} +func (*MsgConnectionOpenAck) Descriptor() ([]byte, []int) { + return fileDescriptor_296ab31199620d78, []int{4} +} +func (m *MsgConnectionOpenAck) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgConnectionOpenAck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgConnectionOpenAck.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgConnectionOpenAck) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgConnectionOpenAck.Merge(m, src) +} +func (m *MsgConnectionOpenAck) XXX_Size() int { + return m.Size() +} +func (m *MsgConnectionOpenAck) XXX_DiscardUnknown() { + xxx_messageInfo_MsgConnectionOpenAck.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgConnectionOpenAck proto.InternalMessageInfo + +// MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type. +type MsgConnectionOpenAckResponse struct { +} + +func (m *MsgConnectionOpenAckResponse) Reset() { *m = MsgConnectionOpenAckResponse{} } +func (m *MsgConnectionOpenAckResponse) String() string { return proto.CompactTextString(m) } +func (*MsgConnectionOpenAckResponse) ProtoMessage() {} +func (*MsgConnectionOpenAckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_296ab31199620d78, []int{5} +} +func (m *MsgConnectionOpenAckResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgConnectionOpenAckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgConnectionOpenAckResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgConnectionOpenAckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgConnectionOpenAckResponse.Merge(m, src) +} +func (m *MsgConnectionOpenAckResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgConnectionOpenAckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgConnectionOpenAckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgConnectionOpenAckResponse proto.InternalMessageInfo + +// MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to +// acknowledge the change of connection state to OPEN on Chain A. +type MsgConnectionOpenConfirm struct { + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty" yaml:"connection_id"` + // proof for the change of the connection state on Chain A: `INIT -> OPEN` + ProofAck []byte `protobuf:"bytes,2,opt,name=proof_ack,json=proofAck,proto3" json:"proof_ack,omitempty" yaml:"proof_ack"` + ProofHeight types1.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + Signer string `protobuf:"bytes,4,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgConnectionOpenConfirm) Reset() { *m = MsgConnectionOpenConfirm{} } +func (m *MsgConnectionOpenConfirm) String() string { return proto.CompactTextString(m) } +func (*MsgConnectionOpenConfirm) ProtoMessage() {} +func (*MsgConnectionOpenConfirm) Descriptor() ([]byte, []int) { + return fileDescriptor_296ab31199620d78, []int{6} +} +func (m *MsgConnectionOpenConfirm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgConnectionOpenConfirm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgConnectionOpenConfirm.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgConnectionOpenConfirm) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgConnectionOpenConfirm.Merge(m, src) +} +func (m *MsgConnectionOpenConfirm) XXX_Size() int { + return m.Size() +} +func (m *MsgConnectionOpenConfirm) XXX_DiscardUnknown() { + xxx_messageInfo_MsgConnectionOpenConfirm.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgConnectionOpenConfirm proto.InternalMessageInfo + +// MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm +// response type. +type MsgConnectionOpenConfirmResponse struct { +} + +func (m *MsgConnectionOpenConfirmResponse) Reset() { *m = MsgConnectionOpenConfirmResponse{} } +func (m *MsgConnectionOpenConfirmResponse) String() string { return proto.CompactTextString(m) } +func (*MsgConnectionOpenConfirmResponse) ProtoMessage() {} +func (*MsgConnectionOpenConfirmResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_296ab31199620d78, []int{7} +} +func (m *MsgConnectionOpenConfirmResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgConnectionOpenConfirmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgConnectionOpenConfirmResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgConnectionOpenConfirmResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgConnectionOpenConfirmResponse.Merge(m, src) +} +func (m *MsgConnectionOpenConfirmResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgConnectionOpenConfirmResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgConnectionOpenConfirmResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgConnectionOpenConfirmResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgConnectionOpenInit)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenInit") + proto.RegisterType((*MsgConnectionOpenInitResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenInitResponse") + proto.RegisterType((*MsgConnectionOpenTry)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenTry") + proto.RegisterType((*MsgConnectionOpenTryResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenTryResponse") + proto.RegisterType((*MsgConnectionOpenAck)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenAck") + proto.RegisterType((*MsgConnectionOpenAckResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenAckResponse") + proto.RegisterType((*MsgConnectionOpenConfirm)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenConfirm") + proto.RegisterType((*MsgConnectionOpenConfirmResponse)(nil), "ibcgo.core.connection.v1.MsgConnectionOpenConfirmResponse") +} + +func init() { proto.RegisterFile("ibcgo/core/connection/v1/tx.proto", fileDescriptor_296ab31199620d78) } + +var fileDescriptor_296ab31199620d78 = []byte{ + // 913 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x6e, 0xeb, 0x44, + 0x14, 0x8e, 0xf3, 0x9f, 0x49, 0xe0, 0xde, 0x6b, 0x92, 0xd6, 0x84, 0xde, 0x38, 0xb1, 0x04, 0x0a, + 0x8b, 0x6b, 0x93, 0x16, 0x81, 0x14, 0xc4, 0x22, 0xc9, 0x86, 0x0a, 0x55, 0x54, 0xa6, 0x02, 0x09, + 0x21, 0x45, 0x89, 0x33, 0x75, 0xac, 0x24, 0x1e, 0xcb, 0x76, 0xa2, 0x5a, 0x48, 0x6c, 0x01, 0x89, + 0x05, 0x2f, 0x80, 0xd4, 0xb7, 0xe0, 0x15, 0xba, 0xec, 0x92, 0x95, 0x85, 0xda, 0x05, 0xac, 0xfd, + 0x04, 0xc8, 0x33, 0xb6, 0x63, 0x27, 0xb6, 0x54, 0x93, 0xb2, 0x9b, 0x33, 0xe7, 0x3b, 0xe7, 0xcc, + 0x9c, 0xf3, 0x7d, 0xa3, 0x01, 0x1d, 0x65, 0x2a, 0xc9, 0x48, 0x90, 0x90, 0x0e, 0x05, 0x09, 0xa9, + 0x2a, 0x94, 0x4c, 0x05, 0xa9, 0xc2, 0xa6, 0x27, 0x98, 0x37, 0xbc, 0xa6, 0x23, 0x13, 0xd1, 0x0c, + 0x86, 0xf0, 0x2e, 0x84, 0xdf, 0x42, 0xf8, 0x4d, 0xaf, 0x59, 0x97, 0x91, 0x8c, 0x30, 0x48, 0x70, + 0x57, 0x04, 0xdf, 0x7c, 0x57, 0x46, 0x48, 0x5e, 0x42, 0x01, 0x5b, 0xd3, 0xf5, 0xb5, 0x30, 0x51, + 0x2d, 0xcf, 0x15, 0xa9, 0xb6, 0x54, 0xa0, 0x6a, 0xba, 0x95, 0xc8, 0xca, 0x83, 0x7c, 0x98, 0x78, + 0xa0, 0x50, 0x6d, 0x0c, 0xe5, 0xfe, 0xc8, 0x82, 0xc6, 0x85, 0x21, 0x8f, 0x82, 0xfd, 0xaf, 0x34, + 0xa8, 0x9e, 0xab, 0x8a, 0x49, 0xf7, 0x40, 0x85, 0x24, 0x1d, 0x2b, 0x33, 0x86, 0x6a, 0x53, 0xdd, + 0xca, 0xb0, 0xee, 0xd8, 0xec, 0x4b, 0x6b, 0xb2, 0x5a, 0xf6, 0xb9, 0xc0, 0xc5, 0x89, 0x65, 0xb2, + 0x3e, 0x9f, 0xd1, 0x97, 0xa0, 0x26, 0xa1, 0xb5, 0x6a, 0x42, 0x5d, 0x9b, 0xe8, 0xa6, 0xc5, 0x64, + 0xdb, 0x54, 0xb7, 0x7a, 0xfa, 0x01, 0x9f, 0x74, 0x79, 0x7e, 0x14, 0x42, 0x0f, 0xf3, 0x77, 0x36, + 0x9b, 0x11, 0x23, 0x19, 0xe8, 0xcf, 0x40, 0x69, 0x03, 0x75, 0x43, 0x41, 0x2a, 0x93, 0xc3, 0xc9, + 0x3a, 0xc9, 0xc9, 0xbe, 0x21, 0x40, 0xd1, 0x8f, 0xa0, 0xfb, 0xa0, 0x36, 0x83, 0xcb, 0x89, 0x35, + 0xd6, 0xa0, 0xae, 0xa0, 0x19, 0x93, 0x6f, 0x53, 0xdd, 0xfc, 0xf0, 0xd8, 0xb1, 0xd9, 0x77, 0xc8, + 0x25, 0xc2, 0x5e, 0x4e, 0xac, 0x62, 0xf3, 0x12, 0x5b, 0xf4, 0x11, 0x28, 0x1a, 0x8a, 0xac, 0x42, + 0x9d, 0x29, 0xb8, 0x57, 0x17, 0x3d, 0xab, 0x5f, 0xfe, 0xf9, 0x96, 0xcd, 0xfc, 0x73, 0xcb, 0x66, + 0x38, 0x16, 0xbc, 0x8e, 0x6d, 0x9c, 0x08, 0x0d, 0x0d, 0xa9, 0x06, 0xe4, 0x7e, 0x2f, 0x81, 0xfa, + 0x1e, 0xe2, 0x4a, 0xb7, 0xfe, 0x4b, 0x67, 0xbf, 0x05, 0x47, 0x9a, 0x0e, 0x37, 0x0a, 0x5a, 0x1b, + 0xe3, 0xed, 0xad, 0xdd, 0xf8, 0x2c, 0x8e, 0xef, 0x38, 0x36, 0xfb, 0x9a, 0xc4, 0xc7, 0xe3, 0x38, + 0xb1, 0xee, 0x3b, 0xb6, 0x07, 0xf2, 0x46, 0x46, 0x0a, 0x1a, 0xe6, 0xc4, 0x84, 0x5e, 0x97, 0xeb, + 0x3c, 0xe1, 0x1f, 0xef, 0xf3, 0x8f, 0x1f, 0xa8, 0x56, 0xb8, 0x73, 0xe1, 0x18, 0x4e, 0xac, 0x12, + 0xf3, 0x6b, 0xd7, 0xda, 0x23, 0x41, 0xfe, 0x60, 0x12, 0xec, 0xce, 0xb1, 0x90, 0x62, 0x8e, 0x37, + 0xa0, 0x11, 0xce, 0x35, 0xf6, 0xb8, 0x61, 0x30, 0xc5, 0x76, 0xee, 0x49, 0x74, 0x1a, 0xb6, 0x1d, + 0x9b, 0x3d, 0xf1, 0x6e, 0x1d, 0x97, 0x89, 0x13, 0xeb, 0xe1, 0x7d, 0x2f, 0xcc, 0xa0, 0xbf, 0x07, + 0x35, 0x4d, 0x47, 0xe8, 0x7a, 0x3c, 0x87, 0x8a, 0x3c, 0x37, 0x99, 0x12, 0xee, 0xc3, 0x49, 0xa4, + 0x20, 0x11, 0xed, 0xa6, 0xc7, 0x7f, 0x81, 0x31, 0xc3, 0xf7, 0xdc, 0xdb, 0x6f, 0xef, 0x15, 0x8e, + 0xe7, 0xc4, 0x2a, 0x36, 0x09, 0x92, 0xfe, 0x18, 0x00, 0xe2, 0x55, 0x54, 0xc5, 0x64, 0xca, 0x6d, + 0xaa, 0x5b, 0x1b, 0x36, 0x1c, 0x9b, 0x7d, 0x15, 0x8e, 0x74, 0x7d, 0x9c, 0x58, 0xc1, 0x06, 0xd6, + 0x74, 0xdf, 0x3f, 0x13, 0xa9, 0xcc, 0x54, 0x70, 0xdc, 0xf1, 0x6e, 0x45, 0xe2, 0xf5, 0x2b, 0x8e, + 0xb0, 0x45, 0x8f, 0xc0, 0x0b, 0xcf, 0xeb, 0xb2, 0x5b, 0x35, 0xd6, 0x06, 0x03, 0x70, 0x78, 0xd3, + 0xb1, 0xd9, 0xa3, 0x48, 0xb8, 0x0f, 0xe0, 0xc4, 0xb7, 0x49, 0x06, 0x7f, 0x83, 0x9e, 0x83, 0x97, + 0x81, 0xd7, 0x6f, 0x4c, 0xf5, 0x09, 0x8d, 0x61, 0xbd, 0xc6, 0x1c, 0xfb, 0x83, 0x88, 0xe6, 0xe0, + 0xc4, 0x17, 0xc1, 0x96, 0xd7, 0xa0, 0xad, 0x80, 0x6b, 0x09, 0x02, 0x6e, 0x81, 0x93, 0x38, 0x79, + 0x06, 0xfa, 0xfd, 0xbb, 0x10, 0xa3, 0xdf, 0x81, 0xb4, 0xa0, 0x3f, 0x07, 0x6f, 0x45, 0x35, 0x48, + 0x34, 0xcc, 0x38, 0x36, 0x5b, 0x0f, 0xce, 0x17, 0x96, 0x5e, 0x4d, 0x0a, 0x4b, 0x4e, 0x02, 0xcd, + 0x08, 0x91, 0xe2, 0xf4, 0xfc, 0xbe, 0x63, 0xb3, 0x9d, 0x18, 0xd2, 0xed, 0x24, 0x66, 0xc2, 0xce, + 0x88, 0xae, 0x0f, 0x7a, 0x38, 0x77, 0x1f, 0x85, 0xfc, 0xc1, 0x8f, 0xc2, 0xae, 0x18, 0x0a, 0xcf, + 0x2a, 0x86, 0x1e, 0x20, 0x1c, 0x1f, 0x9b, 0xba, 0xc5, 0x14, 0x31, 0x29, 0x43, 0x0f, 0x6a, 0xe0, + 0xe2, 0xc4, 0x32, 0x5e, 0xbb, 0x6f, 0xf0, 0xae, 0x12, 0x4a, 0x87, 0x29, 0xa1, 0xfc, 0x2c, 0x4a, + 0xa8, 0xfc, 0xcf, 0x4a, 0x00, 0x29, 0x94, 0x30, 0x90, 0x16, 0x81, 0x12, 0x7e, 0xcd, 0x02, 0x66, + 0x0f, 0x30, 0x42, 0xea, 0xb5, 0xa2, 0xaf, 0x0e, 0x55, 0x43, 0x30, 0xbb, 0x89, 0xb4, 0xc0, 0xe4, + 0x8f, 0x99, 0xdd, 0x44, 0x5a, 0xf8, 0xb3, 0x73, 0xf5, 0xb7, 0x4b, 0xa6, 0xdc, 0xb3, 0x92, 0x69, + 0xdb, 0xae, 0x7c, 0x42, 0xbb, 0x38, 0xd0, 0x4e, 0xea, 0x86, 0xdf, 0xb2, 0xd3, 0x5f, 0xf2, 0x20, + 0x77, 0x61, 0xc8, 0xf4, 0x8f, 0x80, 0x8e, 0xf9, 0x5b, 0x09, 0xc9, 0x62, 0x8c, 0xfd, 0x53, 0x34, + 0x3f, 0x4d, 0x19, 0xe0, 0x9f, 0x83, 0xfe, 0x01, 0xbc, 0xda, 0xff, 0x80, 0xf0, 0x29, 0xb2, 0x5d, + 0xe9, 0x56, 0xf3, 0x93, 0x74, 0xf8, 0xe4, 0xe2, 0xee, 0xf4, 0xd2, 0x14, 0x1f, 0x48, 0x8b, 0x54, + 0xc5, 0x43, 0xa4, 0xa5, 0x7f, 0xa2, 0x40, 0x23, 0x9e, 0xb1, 0xa7, 0x29, 0x32, 0x7a, 0x31, 0xcd, + 0x7e, 0xfa, 0x18, 0xff, 0x24, 0xc3, 0x2f, 0xef, 0x1e, 0x5a, 0xd4, 0xfd, 0x43, 0x8b, 0xfa, 0xeb, + 0xa1, 0x45, 0xfd, 0xf6, 0xd8, 0xca, 0xdc, 0x3f, 0xb6, 0x32, 0x7f, 0x3e, 0xb6, 0x32, 0xdf, 0xf5, + 0x64, 0xc5, 0x9c, 0xaf, 0xa7, 0xbc, 0x84, 0x56, 0x82, 0x84, 0x8c, 0x15, 0x32, 0x04, 0x65, 0x2a, + 0xbd, 0xf1, 0xff, 0xee, 0x1f, 0x9d, 0xbd, 0x09, 0x7d, 0xdf, 0x4d, 0x4b, 0x83, 0xc6, 0xb4, 0x88, + 0x5f, 0xdf, 0xb3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x0e, 0x60, 0x2e, 0x75, 0x0c, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit. + ConnectionOpenInit(ctx context.Context, in *MsgConnectionOpenInit, opts ...grpc.CallOption) (*MsgConnectionOpenInitResponse, error) + // ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry. + ConnectionOpenTry(ctx context.Context, in *MsgConnectionOpenTry, opts ...grpc.CallOption) (*MsgConnectionOpenTryResponse, error) + // ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck. + ConnectionOpenAck(ctx context.Context, in *MsgConnectionOpenAck, opts ...grpc.CallOption) (*MsgConnectionOpenAckResponse, error) + // ConnectionOpenConfirm defines a rpc handler method for + // MsgConnectionOpenConfirm. + ConnectionOpenConfirm(ctx context.Context, in *MsgConnectionOpenConfirm, opts ...grpc.CallOption) (*MsgConnectionOpenConfirmResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) ConnectionOpenInit(ctx context.Context, in *MsgConnectionOpenInit, opts ...grpc.CallOption) (*MsgConnectionOpenInitResponse, error) { + out := new(MsgConnectionOpenInitResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenInit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ConnectionOpenTry(ctx context.Context, in *MsgConnectionOpenTry, opts ...grpc.CallOption) (*MsgConnectionOpenTryResponse, error) { + out := new(MsgConnectionOpenTryResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenTry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ConnectionOpenAck(ctx context.Context, in *MsgConnectionOpenAck, opts ...grpc.CallOption) (*MsgConnectionOpenAckResponse, error) { + out := new(MsgConnectionOpenAckResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenAck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ConnectionOpenConfirm(ctx context.Context, in *MsgConnectionOpenConfirm, opts ...grpc.CallOption) (*MsgConnectionOpenConfirmResponse, error) { + out := new(MsgConnectionOpenConfirmResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.connection.v1.Msg/ConnectionOpenConfirm", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit. + ConnectionOpenInit(context.Context, *MsgConnectionOpenInit) (*MsgConnectionOpenInitResponse, error) + // ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry. + ConnectionOpenTry(context.Context, *MsgConnectionOpenTry) (*MsgConnectionOpenTryResponse, error) + // ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck. + ConnectionOpenAck(context.Context, *MsgConnectionOpenAck) (*MsgConnectionOpenAckResponse, error) + // ConnectionOpenConfirm defines a rpc handler method for + // MsgConnectionOpenConfirm. + ConnectionOpenConfirm(context.Context, *MsgConnectionOpenConfirm) (*MsgConnectionOpenConfirmResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) ConnectionOpenInit(ctx context.Context, req *MsgConnectionOpenInit) (*MsgConnectionOpenInitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConnectionOpenInit not implemented") +} +func (*UnimplementedMsgServer) ConnectionOpenTry(ctx context.Context, req *MsgConnectionOpenTry) (*MsgConnectionOpenTryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConnectionOpenTry not implemented") +} +func (*UnimplementedMsgServer) ConnectionOpenAck(ctx context.Context, req *MsgConnectionOpenAck) (*MsgConnectionOpenAckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConnectionOpenAck not implemented") +} +func (*UnimplementedMsgServer) ConnectionOpenConfirm(ctx context.Context, req *MsgConnectionOpenConfirm) (*MsgConnectionOpenConfirmResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConnectionOpenConfirm not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_ConnectionOpenInit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgConnectionOpenInit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ConnectionOpenInit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenInit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ConnectionOpenInit(ctx, req.(*MsgConnectionOpenInit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ConnectionOpenTry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgConnectionOpenTry) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ConnectionOpenTry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenTry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ConnectionOpenTry(ctx, req.(*MsgConnectionOpenTry)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ConnectionOpenAck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgConnectionOpenAck) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ConnectionOpenAck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenAck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ConnectionOpenAck(ctx, req.(*MsgConnectionOpenAck)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ConnectionOpenConfirm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgConnectionOpenConfirm) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ConnectionOpenConfirm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.connection.v1.Msg/ConnectionOpenConfirm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ConnectionOpenConfirm(ctx, req.(*MsgConnectionOpenConfirm)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibcgo.core.connection.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ConnectionOpenInit", + Handler: _Msg_ConnectionOpenInit_Handler, + }, + { + MethodName: "ConnectionOpenTry", + Handler: _Msg_ConnectionOpenTry_Handler, + }, + { + MethodName: "ConnectionOpenAck", + Handler: _Msg_ConnectionOpenAck_Handler, + }, + { + MethodName: "ConnectionOpenConfirm", + Handler: _Msg_ConnectionOpenConfirm_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibcgo/core/connection/v1/tx.proto", +} + +func (m *MsgConnectionOpenInit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgConnectionOpenInit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgConnectionOpenInit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x2a + } + if m.DelayPeriod != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DelayPeriod)) + i-- + dAtA[i] = 0x20 + } + if m.Version != nil { + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgConnectionOpenInitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgConnectionOpenInitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgConnectionOpenInitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgConnectionOpenTry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgConnectionOpenTry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgConnectionOpenTry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x62 + } + { + size, err := m.ConsensusHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + if len(m.ProofConsensus) > 0 { + i -= len(m.ProofConsensus) + copy(dAtA[i:], m.ProofConsensus) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofConsensus))) + i-- + dAtA[i] = 0x52 + } + if len(m.ProofClient) > 0 { + i -= len(m.ProofClient) + copy(dAtA[i:], m.ProofClient) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofClient))) + i-- + dAtA[i] = 0x4a + } + if len(m.ProofInit) > 0 { + i -= len(m.ProofInit) + copy(dAtA[i:], m.ProofInit) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofInit))) + i-- + dAtA[i] = 0x42 + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.CounterpartyVersions) > 0 { + for iNdEx := len(m.CounterpartyVersions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CounterpartyVersions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.DelayPeriod != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DelayPeriod)) + i-- + dAtA[i] = 0x28 + } + { + size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.PreviousConnectionId) > 0 { + i -= len(m.PreviousConnectionId) + copy(dAtA[i:], m.PreviousConnectionId) + i = encodeVarintTx(dAtA, i, uint64(len(m.PreviousConnectionId))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgConnectionOpenTryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgConnectionOpenTryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgConnectionOpenTryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgConnectionOpenAck) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgConnectionOpenAck) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgConnectionOpenAck) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x52 + } + { + size, err := m.ConsensusHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + if len(m.ProofConsensus) > 0 { + i -= len(m.ProofConsensus) + copy(dAtA[i:], m.ProofConsensus) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofConsensus))) + i-- + dAtA[i] = 0x42 + } + if len(m.ProofClient) > 0 { + i -= len(m.ProofClient) + copy(dAtA[i:], m.ProofClient) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofClient))) + i-- + dAtA[i] = 0x3a + } + if len(m.ProofTry) > 0 { + i -= len(m.ProofTry) + copy(dAtA[i:], m.ProofTry) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofTry))) + i-- + dAtA[i] = 0x32 + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Version != nil { + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.CounterpartyConnectionId) > 0 { + i -= len(m.CounterpartyConnectionId) + copy(dAtA[i:], m.CounterpartyConnectionId) + i = encodeVarintTx(dAtA, i, uint64(len(m.CounterpartyConnectionId))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgConnectionOpenAckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgConnectionOpenAckResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgConnectionOpenAckResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgConnectionOpenConfirm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgConnectionOpenConfirm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgConnectionOpenConfirm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x22 + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ProofAck) > 0 { + i -= len(m.ProofAck) + copy(dAtA[i:], m.ProofAck) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofAck))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgConnectionOpenConfirmResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgConnectionOpenConfirmResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgConnectionOpenConfirmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgConnectionOpenInit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Counterparty.Size() + n += 1 + l + sovTx(uint64(l)) + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovTx(uint64(l)) + } + if m.DelayPeriod != 0 { + n += 1 + sovTx(uint64(m.DelayPeriod)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgConnectionOpenInitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgConnectionOpenTry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.PreviousConnectionId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovTx(uint64(l)) + } + l = m.Counterparty.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DelayPeriod != 0 { + n += 1 + sovTx(uint64(m.DelayPeriod)) + } + if len(m.CounterpartyVersions) > 0 { + for _, e := range m.CounterpartyVersions { + l = e.Size() + n += 1 + l + sovTx(uint64(l)) + } + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.ProofInit) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofClient) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofConsensus) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ConsensusHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgConnectionOpenTryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgConnectionOpenAck) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.CounterpartyConnectionId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovTx(uint64(l)) + } + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.ProofTry) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofClient) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofConsensus) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ConsensusHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgConnectionOpenAckResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgConnectionOpenConfirm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofAck) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgConnectionOpenConfirmResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgConnectionOpenInit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgConnectionOpenInit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgConnectionOpenInit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &Version{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelayPeriod", wireType) + } + m.DelayPeriod = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelayPeriod |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgConnectionOpenInitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgConnectionOpenInitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgConnectionOpenInitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgConnectionOpenTry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgConnectionOpenTry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgConnectionOpenTry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelayPeriod", wireType) + } + m.DelayPeriod = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelayPeriod |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyVersions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CounterpartyVersions = append(m.CounterpartyVersions, &Version{}) + if err := m.CounterpartyVersions[len(m.CounterpartyVersions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofInit", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofInit = append(m.ProofInit[:0], dAtA[iNdEx:postIndex]...) + if m.ProofInit == nil { + m.ProofInit = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofClient", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofClient = append(m.ProofClient[:0], dAtA[iNdEx:postIndex]...) + if m.ProofClient == nil { + m.ProofClient = []byte{} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofConsensus", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofConsensus = append(m.ProofConsensus[:0], dAtA[iNdEx:postIndex]...) + if m.ProofConsensus == nil { + m.ProofConsensus = []byte{} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgConnectionOpenTryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgConnectionOpenTryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgConnectionOpenTryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgConnectionOpenAck) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgConnectionOpenAck: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgConnectionOpenAck: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CounterpartyConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &Version{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofTry", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofTry = append(m.ProofTry[:0], dAtA[iNdEx:postIndex]...) + if m.ProofTry == nil { + m.ProofTry = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofClient", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofClient = append(m.ProofClient[:0], dAtA[iNdEx:postIndex]...) + if m.ProofClient == nil { + m.ProofClient = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofConsensus", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofConsensus = append(m.ProofConsensus[:0], dAtA[iNdEx:postIndex]...) + if m.ProofConsensus == nil { + m.ProofConsensus = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgConnectionOpenAckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgConnectionOpenAckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgConnectionOpenAckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgConnectionOpenConfirm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgConnectionOpenConfirm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgConnectionOpenConfirm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofAck", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofAck = append(m.ProofAck[:0], dAtA[iNdEx:postIndex]...) + if m.ProofAck == nil { + m.ProofAck = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgConnectionOpenConfirmResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgConnectionOpenConfirmResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgConnectionOpenConfirmResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/03-connection/types/version.go b/core/03-connection/types/version.go new file mode 100644 index 0000000000..10c5b33d28 --- /dev/null +++ b/core/03-connection/types/version.go @@ -0,0 +1,220 @@ +package types + +import ( + "strings" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + // DefaultIBCVersion represents the latest supported version of IBC used + // in connection version negotiation. The current version supports only + // ORDERED and UNORDERED channels and requires at least one channel type + // to be agreed upon. + DefaultIBCVersion = NewVersion(DefaultIBCVersionIdentifier, []string{"ORDER_ORDERED", "ORDER_UNORDERED"}) + + // DefaultIBCVersionIdentifier is the IBC v1.0.0 protocol version identifier + DefaultIBCVersionIdentifier = "1" + + // AllowNilFeatureSet is a helper map to indicate if a specified version + // identifier is allowed to have a nil feature set. Any versions supported, + // but not included in the map default to not supporting nil feature sets. + allowNilFeatureSet = map[string]bool{ + DefaultIBCVersionIdentifier: false, + } +) + +var _ exported.Version = &Version{} + +// NewVersion returns a new instance of Version. +func NewVersion(identifier string, features []string) *Version { + return &Version{ + Identifier: identifier, + Features: features, + } +} + +// GetIdentifier implements the VersionI interface +func (version Version) GetIdentifier() string { + return version.Identifier +} + +// GetFeatures implements the VersionI interface +func (version Version) GetFeatures() []string { + return version.Features +} + +// ValidateVersion does basic validation of the version identifier and +// features. It unmarshals the version string into a Version object. +func ValidateVersion(version *Version) error { + if version == nil { + return sdkerrors.Wrap(ErrInvalidVersion, "version cannot be nil") + } + if strings.TrimSpace(version.Identifier) == "" { + return sdkerrors.Wrap(ErrInvalidVersion, "version identifier cannot be blank") + } + for i, feature := range version.Features { + if strings.TrimSpace(feature) == "" { + return sdkerrors.Wrapf(ErrInvalidVersion, "feature cannot be blank, index %d", i) + } + } + + return nil +} + +// VerifyProposedVersion verifies that the entire feature set in the +// proposed version is supported by this chain. If the feature set is +// empty it verifies that this is allowed for the specified version +// identifier. +func (version Version) VerifyProposedVersion(proposedVersion exported.Version) error { + if proposedVersion.GetIdentifier() != version.GetIdentifier() { + return sdkerrors.Wrapf( + ErrVersionNegotiationFailed, + "proposed version identifier does not equal supported version identifier (%s != %s)", proposedVersion.GetIdentifier(), version.GetIdentifier(), + ) + } + + if len(proposedVersion.GetFeatures()) == 0 && !allowNilFeatureSet[proposedVersion.GetIdentifier()] { + return sdkerrors.Wrapf( + ErrVersionNegotiationFailed, + "nil feature sets are not supported for version identifier (%s)", proposedVersion.GetIdentifier(), + ) + } + + for _, proposedFeature := range proposedVersion.GetFeatures() { + if !contains(proposedFeature, version.GetFeatures()) { + return sdkerrors.Wrapf( + ErrVersionNegotiationFailed, + "proposed feature (%s) is not a supported feature set (%s)", proposedFeature, version.GetFeatures(), + ) + } + } + + return nil +} + +// VerifySupportedFeature takes in a version and feature string and returns +// true if the feature is supported by the version and false otherwise. +func VerifySupportedFeature(version exported.Version, feature string) bool { + for _, f := range version.GetFeatures() { + if f == feature { + return true + } + } + return false +} + +// GetCompatibleVersions returns a descending ordered set of compatible IBC +// versions for the caller chain's connection end. The latest supported +// version should be first element and the set should descend to the oldest +// supported version. +func GetCompatibleVersions() []exported.Version { + return []exported.Version{DefaultIBCVersion} +} + +// IsSupportedVersion returns true if the proposed version has a matching version +// identifier and its entire feature set is supported or the version identifier +// supports an empty feature set. +func IsSupportedVersion(proposedVersion *Version) bool { + supportedVersion, found := FindSupportedVersion(proposedVersion, GetCompatibleVersions()) + if !found { + return false + } + + if err := supportedVersion.VerifyProposedVersion(proposedVersion); err != nil { + return false + } + + return true +} + +// FindSupportedVersion returns the version with a matching version identifier +// if it exists. The returned boolean is true if the version is found and +// false otherwise. +func FindSupportedVersion(version exported.Version, supportedVersions []exported.Version) (exported.Version, bool) { + for _, supportedVersion := range supportedVersions { + if version.GetIdentifier() == supportedVersion.GetIdentifier() { + return supportedVersion, true + } + } + return nil, false +} + +// PickVersion iterates over the descending ordered set of compatible IBC +// versions and selects the first version with a version identifier that is +// supported by the counterparty. The returned version contains a feature +// set with the intersection of the features supported by the source and +// counterparty chains. If the feature set intersection is nil and this is +// not allowed for the chosen version identifier then the search for a +// compatible version continues. This function is called in the ConnOpenTry +// handshake procedure. +// +// CONTRACT: PickVersion must only provide a version that is in the +// intersection of the supported versions and the counterparty versions. +func PickVersion(supportedVersions, counterpartyVersions []exported.Version) (*Version, error) { + for _, supportedVersion := range supportedVersions { + // check if the source version is supported by the counterparty + if counterpartyVersion, found := FindSupportedVersion(supportedVersion, counterpartyVersions); found { + featureSet := GetFeatureSetIntersection(supportedVersion.GetFeatures(), counterpartyVersion.GetFeatures()) + if len(featureSet) == 0 && !allowNilFeatureSet[supportedVersion.GetIdentifier()] { + continue + } + + return NewVersion(supportedVersion.GetIdentifier(), featureSet), nil + } + } + + return nil, sdkerrors.Wrapf( + ErrVersionNegotiationFailed, + "failed to find a matching counterparty version (%v) from the supported version list (%v)", counterpartyVersions, supportedVersions, + ) +} + +// GetFeatureSetIntersection returns the intersections of source feature set +// and the counterparty feature set. This is done by iterating over all the +// features in the source version and seeing if they exist in the feature +// set for the counterparty version. +func GetFeatureSetIntersection(sourceFeatureSet, counterpartyFeatureSet []string) (featureSet []string) { + for _, feature := range sourceFeatureSet { + if contains(feature, counterpartyFeatureSet) { + featureSet = append(featureSet, feature) + } + } + + return featureSet +} + +// ExportedVersionsToProto casts a slice of the Version interface to a slice +// of the Version proto definition. +func ExportedVersionsToProto(exportedVersions []exported.Version) []*Version { + versions := make([]*Version, len(exportedVersions)) + for i := range exportedVersions { + versions[i] = exportedVersions[i].(*Version) + } + + return versions +} + +// ProtoVersionsToExported converts a slice of the Version proto definition to +// the Version interface. +func ProtoVersionsToExported(versions []*Version) []exported.Version { + exportedVersions := make([]exported.Version, len(versions)) + for i := range versions { + exportedVersions[i] = versions[i] + } + + return exportedVersions +} + +// contains returns true if the provided string element exists within the +// string set. +func contains(elem string, set []string) bool { + for _, element := range set { + if elem == element { + return true + } + } + + return false +} diff --git a/core/03-connection/types/version_test.go b/core/03-connection/types/version_test.go new file mode 100644 index 0000000000..8f882dd327 --- /dev/null +++ b/core/03-connection/types/version_test.go @@ -0,0 +1,167 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func TestValidateVersion(t *testing.T) { + testCases := []struct { + name string + version *types.Version + expPass bool + }{ + {"valid version", types.DefaultIBCVersion, true}, + {"valid empty feature set", types.NewVersion(types.DefaultIBCVersionIdentifier, []string{}), true}, + {"empty version identifier", types.NewVersion(" ", []string{"ORDER_UNORDERED"}), false}, + {"empty feature", types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_UNORDERED", " "}), false}, + } + + for i, tc := range testCases { + err := types.ValidateVersion(tc.version) + + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +func TestIsSupportedVersion(t *testing.T) { + testCases := []struct { + name string + version *types.Version + expPass bool + }{ + { + "version is supported", + types.ExportedVersionsToProto(types.GetCompatibleVersions())[0], + true, + }, + { + "version is not supported", + &types.Version{}, + false, + }, + { + "version feature is not supported", + types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_DAG"}), + false, + }, + } + + for _, tc := range testCases { + require.Equal(t, tc.expPass, types.IsSupportedVersion(tc.version)) + } +} + +func TestFindSupportedVersion(t *testing.T) { + testCases := []struct { + name string + version *types.Version + supportedVersions []exported.Version + expVersion *types.Version + expFound bool + }{ + {"valid supported version", types.DefaultIBCVersion, types.GetCompatibleVersions(), types.DefaultIBCVersion, true}, + {"empty (invalid) version", &types.Version{}, types.GetCompatibleVersions(), &types.Version{}, false}, + {"empty supported versions", types.DefaultIBCVersion, []exported.Version{}, &types.Version{}, false}, + {"desired version is last", types.DefaultIBCVersion, []exported.Version{types.NewVersion("1.1", nil), types.NewVersion("2", []string{"ORDER_UNORDERED"}), types.NewVersion("3", nil), types.DefaultIBCVersion}, types.DefaultIBCVersion, true}, + {"desired version identifier with different feature set", types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_DAG"}), types.GetCompatibleVersions(), types.DefaultIBCVersion, true}, + {"version not supported", types.NewVersion("2", []string{"ORDER_DAG"}), types.GetCompatibleVersions(), &types.Version{}, false}, + } + + for i, tc := range testCases { + version, found := types.FindSupportedVersion(tc.version, tc.supportedVersions) + if tc.expFound { + require.Equal(t, tc.expVersion.GetIdentifier(), version.GetIdentifier(), "test case %d: %s", i, tc.name) + require.True(t, found, "test case %d: %s", i, tc.name) + } else { + require.False(t, found, "test case: %s", tc.name) + require.Nil(t, version, "test case: %s", tc.name) + } + } +} + +func TestPickVersion(t *testing.T) { + testCases := []struct { + name string + supportedVersions []exported.Version + counterpartyVersions []exported.Version + expVer *types.Version + expPass bool + }{ + {"valid default ibc version", types.GetCompatibleVersions(), types.GetCompatibleVersions(), types.DefaultIBCVersion, true}, + {"valid version in counterparty versions", types.GetCompatibleVersions(), []exported.Version{types.NewVersion("version1", nil), types.NewVersion("2.0.0", []string{"ORDER_UNORDERED-ZK"}), types.DefaultIBCVersion}, types.DefaultIBCVersion, true}, + {"valid identifier match but empty feature set not allowed", types.GetCompatibleVersions(), []exported.Version{types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"DAG", "ORDERED-ZK", "UNORDERED-zk]"})}, types.NewVersion(types.DefaultIBCVersionIdentifier, nil), false}, + {"empty counterparty versions", types.GetCompatibleVersions(), []exported.Version{}, &types.Version{}, false}, + {"non-matching counterparty versions", types.GetCompatibleVersions(), []exported.Version{types.NewVersion("2.0.0", nil)}, &types.Version{}, false}, + {"non-matching counterparty versions (uses ordered channels only) contained in supported versions (uses unordered channels only)", []exported.Version{types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_UNORDERED"})}, []exported.Version{types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_ORDERED"})}, &types.Version{}, false}, + } + + for i, tc := range testCases { + version, err := types.PickVersion(tc.supportedVersions, tc.counterpartyVersions) + + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + var emptyVersion *types.Version + require.Equal(t, emptyVersion, version, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +func TestVerifyProposedVersion(t *testing.T) { + testCases := []struct { + name string + proposedVersion *types.Version + supportedVersion *types.Version + expPass bool + }{ + {"entire feature set supported", types.DefaultIBCVersion, types.NewVersion("1", []string{"ORDER_ORDERED", "ORDER_UNORDERED", "ORDER_DAG"}), true}, + {"empty feature sets not supported", types.NewVersion("1", []string{}), types.DefaultIBCVersion, false}, + {"one feature missing", types.DefaultIBCVersion, types.NewVersion("1", []string{"ORDER_UNORDERED", "ORDER_DAG"}), false}, + {"both features missing", types.DefaultIBCVersion, types.NewVersion("1", []string{"ORDER_DAG"}), false}, + {"identifiers do not match", types.NewVersion("2", []string{"ORDER_UNORDERED", "ORDER_ORDERED"}), types.DefaultIBCVersion, false}, + } + + for i, tc := range testCases { + err := tc.supportedVersion.VerifyProposedVersion(tc.proposedVersion) + + if tc.expPass { + require.NoError(t, err, "test case %d: %s", i, tc.name) + } else { + require.Error(t, err, "test case %d: %s", i, tc.name) + } + } + +} + +func TestVerifySupportedFeature(t *testing.T) { + nilFeatures := types.NewVersion(types.DefaultIBCVersionIdentifier, nil) + + testCases := []struct { + name string + version *types.Version + feature string + expPass bool + }{ + {"check ORDERED supported", ibctesting.ConnectionVersion, "ORDER_ORDERED", true}, + {"check UNORDERED supported", ibctesting.ConnectionVersion, "ORDER_UNORDERED", true}, + {"check DAG unsupported", ibctesting.ConnectionVersion, "ORDER_DAG", false}, + {"check empty feature set returns false", nilFeatures, "ORDER_ORDERED", false}, + } + + for i, tc := range testCases { + supported := types.VerifySupportedFeature(tc.version, tc.feature) + + require.Equal(t, tc.expPass, supported, "test case %d: %s", i, tc.name) + } +} diff --git a/core/04-channel/client/cli/cli.go b/core/04-channel/client/cli/cli.go new file mode 100644 index 0000000000..baf386feca --- /dev/null +++ b/core/04-channel/client/cli/cli.go @@ -0,0 +1,58 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// GetQueryCmd returns the query commands for IBC channels +func GetQueryCmd() *cobra.Command { + queryCmd := &cobra.Command{ + Use: types.SubModuleName, + Short: "IBC channel query subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + queryCmd.AddCommand( + GetCmdQueryChannels(), + GetCmdQueryChannel(), + GetCmdQueryConnectionChannels(), + GetCmdQueryChannelClientState(), + GetCmdQueryPacketCommitment(), + GetCmdQueryPacketCommitments(), + GetCmdQueryPacketReceipt(), + GetCmdQueryPacketAcknowledgement(), + GetCmdQueryUnreceivedPackets(), + GetCmdQueryUnreceivedAcks(), + GetCmdQueryNextSequenceReceive(), + // TODO: next sequence Send ? + ) + + return queryCmd +} + +// NewTxCmd returns a CLI command handler for all x/ibc channel transaction commands. +func NewTxCmd() *cobra.Command { + txCmd := &cobra.Command{ + Use: types.SubModuleName, + Short: "IBC channel transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + txCmd.AddCommand( + NewChannelOpenInitCmd(), + NewChannelOpenTryCmd(), + NewChannelOpenAckCmd(), + NewChannelOpenConfirmCmd(), + NewChannelCloseInitCmd(), + NewChannelCloseConfirmCmd(), + ) + + return txCmd +} diff --git a/core/04-channel/client/cli/query.go b/core/04-channel/client/cli/query.go new file mode 100644 index 0000000000..03df474f1e --- /dev/null +++ b/core/04-channel/client/cli/query.go @@ -0,0 +1,457 @@ +package cli + +import ( + "fmt" + "strconv" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/utils" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +const ( + flagSequences = "sequences" +) + +// GetCmdQueryChannels defines the command to query all the channels ends +// that this chain mantains. +func GetCmdQueryChannels() *cobra.Command { + cmd := &cobra.Command{ + Use: "channels", + Short: "Query all channels", + Long: "Query all channels from a chain", + Example: fmt.Sprintf("%s query %s %s channels", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + req := &types.QueryChannelsRequest{ + Pagination: pageReq, + } + + res, err := queryClient.Channels(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "channels") + + return cmd +} + +// GetCmdQueryChannel defines the command to query a channel end +func GetCmdQueryChannel() *cobra.Command { + cmd := &cobra.Command{ + Use: "end [port-id] [channel-id]", + Short: "Query a channel end", + Long: "Query an IBC channel end from a port and channel identifiers", + Example: fmt.Sprintf( + "%s query %s %s end [port-id] [channel-id]", version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + channelRes, err := utils.QueryChannel(clientCtx, portID, channelID, prove) + if err != nil { + return err + } + + return clientCtx.PrintProto(channelRes) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryConnectionChannels defines the command to query all the channels associated with a +// connection +func GetCmdQueryConnectionChannels() *cobra.Command { + cmd := &cobra.Command{ + Use: "connections [connection-id]", + Short: "Query all channels associated with a connection", + Long: "Query all channels associated with a connection", + Example: fmt.Sprintf("%s query %s %s connections [connection-id]", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + req := &types.QueryConnectionChannelsRequest{ + Connection: args[0], + Pagination: pageReq, + } + + res, err := queryClient.ConnectionChannels(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "channels associated with a connection") + + return cmd +} + +// GetCmdQueryChannelClientState defines the command to query a client state from a channel +func GetCmdQueryChannelClientState() *cobra.Command { + cmd := &cobra.Command{ + Use: "client-state [port-id] [channel-id]", + Short: "Query the client state associated with a channel", + Long: "Query the client state associated with a channel, by providing its port and channel identifiers.", + Example: fmt.Sprintf("%s query ibc channel client-state [port-id] [channel-id]", version.AppName), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + + res, err := utils.QueryChannelClientState(clientCtx, portID, channelID, false) + if err != nil { + return err + } + + return clientCtx.PrintProto(res.IdentifiedClientState) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryPacketCommitments defines the command to query all packet commitments associated with +// a channel +func GetCmdQueryPacketCommitments() *cobra.Command { + cmd := &cobra.Command{ + Use: "packet-commitments [port-id] [channel-id]", + Short: "Query all packet commitments associated with a channel", + Long: "Query all packet commitments associated with a channel", + Example: fmt.Sprintf("%s query %s %s packet-commitments [port-id] [channel-id]", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + req := &types.QueryPacketCommitmentsRequest{ + PortId: args[0], + ChannelId: args[1], + Pagination: pageReq, + } + + res, err := queryClient.PacketCommitments(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "packet commitments associated with a channel") + + return cmd +} + +// GetCmdQueryPacketCommitment defines the command to query a packet commitment +func GetCmdQueryPacketCommitment() *cobra.Command { + cmd := &cobra.Command{ + Use: "packet-commitment [port-id] [channel-id] [sequence]", + Short: "Query a packet commitment", + Long: "Query a packet commitment", + Example: fmt.Sprintf( + "%s query %s %s packet-commitment [port-id] [channel-id] [sequence]", version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + seq, err := strconv.ParseUint(args[2], 10, 64) + if err != nil { + return err + } + + res, err := utils.QueryPacketCommitment(clientCtx, portID, channelID, seq, prove) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryPacketReceipt defines the command to query a packet receipt +func GetCmdQueryPacketReceipt() *cobra.Command { + cmd := &cobra.Command{ + Use: "packet-receipt [port-id] [channel-id] [sequence]", + Short: "Query a packet receipt", + Long: "Query a packet receipt", + Example: fmt.Sprintf( + "%s query %s %s packet-receipt [port-id] [channel-id] [sequence]", version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + seq, err := strconv.ParseUint(args[2], 10, 64) + if err != nil { + return err + } + + res, err := utils.QueryPacketReceipt(clientCtx, portID, channelID, seq, prove) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryPacketAcknowledgement defines the command to query a packet acknowledgement +func GetCmdQueryPacketAcknowledgement() *cobra.Command { + cmd := &cobra.Command{ + Use: "packet-ack [port-id] [channel-id] [sequence]", + Short: "Query a packet acknowledgement", + Long: "Query a packet acknowledgement", + Example: fmt.Sprintf( + "%s query %s %s packet-ack [port-id] [channel-id] [sequence]", version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + seq, err := strconv.ParseUint(args[2], 10, 64) + if err != nil { + return err + } + + res, err := utils.QueryPacketAcknowledgement(clientCtx, portID, channelID, seq, prove) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryUnreceivedPackets defines the command to query all the unreceived +// packets on the receiving chain +func GetCmdQueryUnreceivedPackets() *cobra.Command { + cmd := &cobra.Command{ + Use: "unreceived-packets [port-id] [channel-id]", + Short: "Query all the unreceived packets associated with a channel", + Long: `Determine if a packet, given a list of packet commitment sequences, is unreceived. + +The return value represents: +- Unreceived packet commitments: no acknowledgement exists on receiving chain for the given packet commitment sequence on sending chain. +`, + Example: fmt.Sprintf("%s query %s %s unreceived-packets [port-id] [channel-id] --sequences=1,2,3", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + seqSlice, err := cmd.Flags().GetInt64Slice(flagSequences) + if err != nil { + return err + } + + seqs := make([]uint64, len(seqSlice)) + for i := range seqSlice { + seqs[i] = uint64(seqSlice[i]) + } + + req := &types.QueryUnreceivedPacketsRequest{ + PortId: args[0], + ChannelId: args[1], + PacketCommitmentSequences: seqs, + } + + res, err := queryClient.UnreceivedPackets(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + cmd.Flags().Int64Slice(flagSequences, []int64{}, "comma separated list of packet sequence numbers") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryUnreceivedAcks defines the command to query all the unreceived acks on the original sending chain +func GetCmdQueryUnreceivedAcks() *cobra.Command { + cmd := &cobra.Command{ + Use: "unreceived-acks [port-id] [channel-id]", + Short: "Query all the unreceived acks associated with a channel", + Long: `Given a list of acknowledgement sequences from counterparty, determine if an ack on the counterparty chain has been received on the executing chain. + +The return value represents: +- Unreceived packet acknowledgement: packet commitment exists on original sending (executing) chain and ack exists on receiving chain. +`, + Example: fmt.Sprintf("%s query %s %s unreceived-acks [port-id] [channel-id] --sequences=1,2,3", version.AppName, host.ModuleName, types.SubModuleName), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + seqSlice, err := cmd.Flags().GetInt64Slice(flagSequences) + if err != nil { + return err + } + + seqs := make([]uint64, len(seqSlice)) + for i := range seqSlice { + seqs[i] = uint64(seqSlice[i]) + } + + req := &types.QueryUnreceivedAcksRequest{ + PortId: args[0], + ChannelId: args[1], + PacketAckSequences: seqs, + } + + res, err := queryClient.UnreceivedAcks(cmd.Context(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + cmd.Flags().Int64Slice(flagSequences, []int64{}, "comma separated list of packet sequence numbers") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryNextSequenceReceive defines the command to query a next receive sequence for a given channel +func GetCmdQueryNextSequenceReceive() *cobra.Command { + cmd := &cobra.Command{ + Use: "next-sequence-receive [port-id] [channel-id]", + Short: "Query a next receive sequence", + Long: "Query the next receive sequence for a given channel", + Example: fmt.Sprintf( + "%s query %s %s next-sequence-receive [port-id] [channel-id]", version.AppName, host.ModuleName, types.SubModuleName, + ), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + prove, _ := cmd.Flags().GetBool(flags.FlagProve) + + sequenceRes, err := utils.QueryNextSequenceReceive(clientCtx, portID, channelID, prove) + if err != nil { + return err + } + + clientCtx = clientCtx.WithHeight(int64(sequenceRes.ProofHeight.RevisionHeight)) + return clientCtx.PrintProto(sequenceRes) + }, + } + + cmd.Flags().Bool(flags.FlagProve, true, "show proofs for the query results") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/core/04-channel/client/cli/tx.go b/core/04-channel/client/cli/tx.go new file mode 100644 index 0000000000..20afe62267 --- /dev/null +++ b/core/04-channel/client/cli/tx.go @@ -0,0 +1,288 @@ +package cli + +import ( + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/types/msgservice" + ibctransfertypes "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectionutils "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/client/utils" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// IBC Channel flags +const ( + FlagOrdered = "ordered" + FlagIBCVersion = "ibc-version" +) + +// NewChannelOpenInitCmd returns the command to create a MsgChannelOpenInit transaction +func NewChannelOpenInitCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "open-init [port-id] [counterparty-port-id] [connection-hops]", + Short: "Creates and sends a ChannelOpenInit message", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + portID := args[0] + counterpartyPortID := args[1] + hops := strings.Split(args[2], "/") + order := channelOrder(cmd.Flags()) + version, _ := cmd.Flags().GetString(FlagIBCVersion) + + msg := types.NewMsgChannelOpenInit( + portID, version, order, hops, + counterpartyPortID, clientCtx.GetFromAddress(), + ) + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ChannelOpenInit(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + cmd.Flags().Bool(FlagOrdered, true, "Pass flag for opening ordered channels") + cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewChannelOpenTryCmd returns the command to create a MsgChannelOpenTry transaction +func NewChannelOpenTryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "open-try [port-id] [channel-id] [counterparty-port-id] [counterparty-channel-id] [connection-hops] [/path/to/proof_init.json] [proof-height]", + Short: "Creates and sends a ChannelOpenTry message", + Args: cobra.ExactArgs(7), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + counterpartyPortID := args[2] + counterpartyChannelID := args[3] + hops := strings.Split(args[4], "/") + order := channelOrder(cmd.Flags()) + + // TODO: Differentiate between channel and counterparty versions. + version, _ := cmd.Flags().GetString(FlagIBCVersion) + + proofInit, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[5]) + if err != nil { + return err + } + + proofHeight, err := clienttypes.ParseHeight(args[6]) + if err != nil { + return err + } + + msg := types.NewMsgChannelOpenTry( + portID, channelID, version, order, hops, + counterpartyPortID, counterpartyChannelID, version, + proofInit, proofHeight, clientCtx.GetFromAddress(), + ) + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ChannelOpenTry(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + cmd.Flags().Bool(FlagOrdered, true, "Pass flag for opening ordered channels") + cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewChannelOpenAckCmd returns the command to create a MsgChannelOpenAck transaction +func NewChannelOpenAckCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "open-ack [port-id] [channel-id] [counterparty-channel-id] [/path/to/proof_try.json] [proof-height]", + Short: "Creates and sends a ChannelOpenAck message", + Args: cobra.ExactArgs(5), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + counterpartyChannelID := args[2] + + // TODO: Differentiate between channel and counterparty versions. + version, _ := cmd.Flags().GetString(FlagIBCVersion) + + proofTry, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[3]) + if err != nil { + return err + } + + proofHeight, err := clienttypes.ParseHeight(args[4]) + if err != nil { + return err + } + + msg := types.NewMsgChannelOpenAck( + portID, channelID, counterpartyChannelID, version, proofTry, proofHeight, clientCtx.GetFromAddress(), + ) + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ChannelOpenAck(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + cmd.Flags().String(FlagIBCVersion, ibctransfertypes.Version, "IBC application version") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewChannelOpenConfirmCmd returns the command to create a MsgChannelOpenConfirm transaction +func NewChannelOpenConfirmCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "open-confirm [port-id] [channel-id] [/path/to/proof_ack.json] [proof-height]", + Short: "Creates and sends a ChannelOpenConfirm message", + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + + proofAck, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[2]) + if err != nil { + return err + } + + proofHeight, err := clienttypes.ParseHeight(args[3]) + if err != nil { + return err + } + + msg := types.NewMsgChannelOpenConfirm( + portID, channelID, proofAck, proofHeight, clientCtx.GetFromAddress(), + ) + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ChannelOpenConfirm(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewChannelCloseInitCmd returns the command to create a MsgChannelCloseInit transaction +func NewChannelCloseInitCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "close-init [port-id] [channel-id]", + Short: "Creates and sends a ChannelCloseInit message", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + + msg := types.NewMsgChannelCloseInit(portID, channelID, clientCtx.GetFromAddress()) + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ChannelCloseInit(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// NewChannelCloseConfirmCmd returns the command to create a MsgChannelCloseConfirm transaction +func NewChannelCloseConfirmCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "close-confirm [port-id] [channel-id] [/path/to/proof_init.json] [proof-height]", + Short: "Creates and sends a ChannelCloseConfirm message", + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + portID := args[0] + channelID := args[1] + + proofInit, err := connectionutils.ParseProof(clientCtx.LegacyAmino, args[2]) + if err != nil { + return err + } + + proofHeight, err := clienttypes.ParseHeight(args[3]) + if err != nil { + return err + } + + msg := types.NewMsgChannelCloseConfirm( + portID, channelID, proofInit, proofHeight, clientCtx.GetFromAddress(), + ) + svcMsgClientConn := &msgservice.ServiceMsgClientConn{} + msgClient := types.NewMsgClient(svcMsgClientConn) + _, err = msgClient.ChannelCloseConfirm(cmd.Context(), msg) + if err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), svcMsgClientConn.GetMsgs()...) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +func channelOrder(fs *pflag.FlagSet) types.Order { + if ordered, _ := fs.GetBool(FlagOrdered); ordered { + return types.ORDERED + } + + return types.UNORDERED +} diff --git a/core/04-channel/client/utils/utils.go b/core/04-channel/client/utils/utils.go new file mode 100644 index 0000000000..167e05d048 --- /dev/null +++ b/core/04-channel/client/utils/utils.go @@ -0,0 +1,301 @@ +package utils + +import ( + "context" + "encoding/binary" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clientutils "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/client/utils" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/client" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// QueryChannel returns a channel end. +// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise, +// it uses the gRPC query client. +func QueryChannel( + clientCtx client.Context, portID, channelID string, prove bool, +) (*types.QueryChannelResponse, error) { + if prove { + return queryChannelABCI(clientCtx, portID, channelID) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryChannelRequest{ + PortId: portID, + ChannelId: channelID, + } + + return queryClient.Channel(context.Background(), req) +} + +func queryChannelABCI(clientCtx client.Context, portID, channelID string) (*types.QueryChannelResponse, error) { + key := host.ChannelKey(portID, channelID) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + // check if channel exists + if len(value) == 0 { + return nil, sdkerrors.Wrapf(types.ErrChannelNotFound, "portID (%s), channelID (%s)", portID, channelID) + } + + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + var channel types.Channel + if err := cdc.UnmarshalBinaryBare(value, &channel); err != nil { + return nil, err + } + + return types.NewQueryChannelResponse(channel, proofBz, proofHeight), nil +} + +// QueryChannelClientState returns the ClientState of a channel end. If +// prove is true, it performs an ABCI store query in order to retrieve the +// merkle proof. Otherwise, it uses the gRPC query client. +func QueryChannelClientState( + clientCtx client.Context, portID, channelID string, prove bool, +) (*types.QueryChannelClientStateResponse, error) { + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryChannelClientStateRequest{ + PortId: portID, + ChannelId: channelID, + } + + res, err := queryClient.ChannelClientState(context.Background(), req) + if err != nil { + return nil, err + } + + if prove { + clientStateRes, err := clientutils.QueryClientStateABCI(clientCtx, res.IdentifiedClientState.ClientId) + if err != nil { + return nil, err + } + + // use client state returned from ABCI query in case query height differs + identifiedClientState := clienttypes.IdentifiedClientState{ + ClientId: res.IdentifiedClientState.ClientId, + ClientState: clientStateRes.ClientState, + } + res = types.NewQueryChannelClientStateResponse(identifiedClientState, clientStateRes.Proof, clientStateRes.ProofHeight) + } + + return res, nil +} + +// QueryChannelConsensusState returns the ConsensusState of a channel end. If +// prove is true, it performs an ABCI store query in order to retrieve the +// merkle proof. Otherwise, it uses the gRPC query client. +func QueryChannelConsensusState( + clientCtx client.Context, portID, channelID string, height clienttypes.Height, prove bool, +) (*types.QueryChannelConsensusStateResponse, error) { + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryChannelConsensusStateRequest{ + PortId: portID, + ChannelId: channelID, + RevisionNumber: height.RevisionNumber, + RevisionHeight: height.RevisionHeight, + } + + res, err := queryClient.ChannelConsensusState(context.Background(), req) + if err != nil { + return nil, err + } + + if prove { + consensusStateRes, err := clientutils.QueryConsensusStateABCI(clientCtx, res.ClientId, height) + if err != nil { + return nil, err + } + + res = types.NewQueryChannelConsensusStateResponse(res.ClientId, consensusStateRes.ConsensusState, height, consensusStateRes.Proof, consensusStateRes.ProofHeight) + } + + return res, nil +} + +// QueryLatestConsensusState uses the channel Querier to return the +// latest ConsensusState given the source port ID and source channel ID. +func QueryLatestConsensusState( + clientCtx client.Context, portID, channelID string, +) (exported.ConsensusState, clienttypes.Height, clienttypes.Height, error) { + clientRes, err := QueryChannelClientState(clientCtx, portID, channelID, false) + if err != nil { + return nil, clienttypes.Height{}, clienttypes.Height{}, err + } + + var clientState exported.ClientState + if err := clientCtx.InterfaceRegistry.UnpackAny(clientRes.IdentifiedClientState.ClientState, &clientState); err != nil { + return nil, clienttypes.Height{}, clienttypes.Height{}, err + } + + clientHeight, ok := clientState.GetLatestHeight().(clienttypes.Height) + if !ok { + return nil, clienttypes.Height{}, clienttypes.Height{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "invalid height type. expected type: %T, got: %T", + clienttypes.Height{}, clientHeight) + } + res, err := QueryChannelConsensusState(clientCtx, portID, channelID, clientHeight, false) + if err != nil { + return nil, clienttypes.Height{}, clienttypes.Height{}, err + } + + var consensusState exported.ConsensusState + if err := clientCtx.InterfaceRegistry.UnpackAny(res.ConsensusState, &consensusState); err != nil { + return nil, clienttypes.Height{}, clienttypes.Height{}, err + } + + return consensusState, clientHeight, res.ProofHeight, nil +} + +// QueryNextSequenceReceive returns the next sequence receive. +// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise, +// it uses the gRPC query client. +func QueryNextSequenceReceive( + clientCtx client.Context, portID, channelID string, prove bool, +) (*types.QueryNextSequenceReceiveResponse, error) { + if prove { + return queryNextSequenceRecvABCI(clientCtx, portID, channelID) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryNextSequenceReceiveRequest{ + PortId: portID, + ChannelId: channelID, + } + + return queryClient.NextSequenceReceive(context.Background(), req) +} + +func queryNextSequenceRecvABCI(clientCtx client.Context, portID, channelID string) (*types.QueryNextSequenceReceiveResponse, error) { + key := host.NextSequenceRecvKey(portID, channelID) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + // check if next sequence receive exists + if len(value) == 0 { + return nil, sdkerrors.Wrapf(types.ErrChannelNotFound, "portID (%s), channelID (%s)", portID, channelID) + } + + sequence := binary.BigEndian.Uint64(value) + + return types.NewQueryNextSequenceReceiveResponse(sequence, proofBz, proofHeight), nil +} + +// QueryPacketCommitment returns a packet commitment. +// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise, +// it uses the gRPC query client. +func QueryPacketCommitment( + clientCtx client.Context, portID, channelID string, + sequence uint64, prove bool, +) (*types.QueryPacketCommitmentResponse, error) { + if prove { + return queryPacketCommitmentABCI(clientCtx, portID, channelID, sequence) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryPacketCommitmentRequest{ + PortId: portID, + ChannelId: channelID, + Sequence: sequence, + } + + return queryClient.PacketCommitment(context.Background(), req) +} + +func queryPacketCommitmentABCI( + clientCtx client.Context, portID, channelID string, sequence uint64, +) (*types.QueryPacketCommitmentResponse, error) { + key := host.PacketCommitmentKey(portID, channelID, sequence) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + // check if packet commitment exists + if len(value) == 0 { + return nil, sdkerrors.Wrapf(types.ErrPacketCommitmentNotFound, "portID (%s), channelID (%s), sequence (%d)", portID, channelID, sequence) + } + + return types.NewQueryPacketCommitmentResponse(value, proofBz, proofHeight), nil +} + +// QueryPacketReceipt returns data about a packet receipt. +// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise, +// it uses the gRPC query client. +func QueryPacketReceipt( + clientCtx client.Context, portID, channelID string, + sequence uint64, prove bool, +) (*types.QueryPacketReceiptResponse, error) { + if prove { + return queryPacketReceiptABCI(clientCtx, portID, channelID, sequence) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryPacketReceiptRequest{ + PortId: portID, + ChannelId: channelID, + Sequence: sequence, + } + + return queryClient.PacketReceipt(context.Background(), req) +} + +func queryPacketReceiptABCI( + clientCtx client.Context, portID, channelID string, sequence uint64, +) (*types.QueryPacketReceiptResponse, error) { + key := host.PacketReceiptKey(portID, channelID, sequence) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + return types.NewQueryPacketReceiptResponse(value != nil, proofBz, proofHeight), nil +} + +// QueryPacketAcknowledgement returns the data about a packet acknowledgement. +// If prove is true, it performs an ABCI store query in order to retrieve the merkle proof. Otherwise, +// it uses the gRPC query client +func QueryPacketAcknowledgement(clientCtx client.Context, portID, channelID string, sequence uint64, prove bool) (*types.QueryPacketAcknowledgementResponse, error) { + if prove { + return queryPacketAcknowledgementABCI(clientCtx, portID, channelID, sequence) + } + + queryClient := types.NewQueryClient(clientCtx) + req := &types.QueryPacketAcknowledgementRequest{ + PortId: portID, + ChannelId: channelID, + Sequence: sequence, + } + + return queryClient.PacketAcknowledgement(context.Background(), req) +} + +func queryPacketAcknowledgementABCI(clientCtx client.Context, portID, channelID string, sequence uint64) (*types.QueryPacketAcknowledgementResponse, error) { + key := host.PacketAcknowledgementKey(portID, channelID, sequence) + + value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) + if err != nil { + return nil, err + } + + if len(value) == 0 { + return nil, sdkerrors.Wrapf(types.ErrInvalidAcknowledgement, "portID (%s), channelID (%s), sequence (%d)", portID, channelID, sequence) + } + + return types.NewQueryPacketAcknowledgementResponse(value, proofBz, proofHeight), nil +} diff --git a/core/04-channel/genesis.go b/core/04-channel/genesis.go new file mode 100644 index 0000000000..07fad47d77 --- /dev/null +++ b/core/04-channel/genesis.go @@ -0,0 +1,48 @@ +package channel + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// InitGenesis initializes the ibc channel submodule's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, gs types.GenesisState) { + for _, channel := range gs.Channels { + ch := types.NewChannel(channel.State, channel.Ordering, channel.Counterparty, channel.ConnectionHops, channel.Version) + k.SetChannel(ctx, channel.PortId, channel.ChannelId, ch) + } + for _, ack := range gs.Acknowledgements { + k.SetPacketAcknowledgement(ctx, ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) + } + for _, commitment := range gs.Commitments { + k.SetPacketCommitment(ctx, commitment.PortId, commitment.ChannelId, commitment.Sequence, commitment.Data) + } + for _, receipt := range gs.Receipts { + k.SetPacketReceipt(ctx, receipt.PortId, receipt.ChannelId, receipt.Sequence) + } + for _, ss := range gs.SendSequences { + k.SetNextSequenceSend(ctx, ss.PortId, ss.ChannelId, ss.Sequence) + } + for _, rs := range gs.RecvSequences { + k.SetNextSequenceRecv(ctx, rs.PortId, rs.ChannelId, rs.Sequence) + } + for _, as := range gs.AckSequences { + k.SetNextSequenceAck(ctx, as.PortId, as.ChannelId, as.Sequence) + } + k.SetNextChannelSequence(ctx, gs.NextChannelSequence) +} + +// ExportGenesis returns the ibc channel submodule's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) types.GenesisState { + return types.GenesisState{ + Channels: k.GetAllChannels(ctx), + Acknowledgements: k.GetAllPacketAcks(ctx), + Commitments: k.GetAllPacketCommitments(ctx), + Receipts: k.GetAllPacketReceipts(ctx), + SendSequences: k.GetAllPacketSendSeqs(ctx), + RecvSequences: k.GetAllPacketRecvSeqs(ctx), + AckSequences: k.GetAllPacketAckSeqs(ctx), + } +} diff --git a/core/04-channel/handler.go b/core/04-channel/handler.go new file mode 100644 index 0000000000..375c35263e --- /dev/null +++ b/core/04-channel/handler.go @@ -0,0 +1,186 @@ +package channel + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// HandleMsgChannelOpenInit defines the sdk.Handler for MsgChannelOpenInit +func HandleMsgChannelOpenInit(ctx sdk.Context, k keeper.Keeper, portCap *capabilitytypes.Capability, msg *types.MsgChannelOpenInit) (*sdk.Result, string, *capabilitytypes.Capability, error) { + channelID, capKey, err := k.ChanOpenInit( + ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, + portCap, msg.Channel.Counterparty, msg.Channel.Version, + ) + if err != nil { + return nil, "", nil, sdkerrors.Wrap(err, "channel handshake open init failed") + } + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeChannelOpenInit, + sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId), + sdk.NewAttribute(types.AttributeKeyChannelID, channelID), + sdk.NewAttribute(types.AttributeCounterpartyPortID, msg.Channel.Counterparty.PortId), + sdk.NewAttribute(types.AttributeCounterpartyChannelID, msg.Channel.Counterparty.ChannelId), + sdk.NewAttribute(types.AttributeKeyConnectionID, msg.Channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, channelID, capKey, nil +} + +// HandleMsgChannelOpenTry defines the sdk.Handler for MsgChannelOpenTry +func HandleMsgChannelOpenTry(ctx sdk.Context, k keeper.Keeper, portCap *capabilitytypes.Capability, msg *types.MsgChannelOpenTry) (*sdk.Result, string, *capabilitytypes.Capability, error) { + channelID, capKey, err := k.ChanOpenTry(ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, msg.PreviousChannelId, + portCap, msg.Channel.Counterparty, msg.Channel.Version, msg.CounterpartyVersion, msg.ProofInit, msg.ProofHeight, + ) + if err != nil { + return nil, "", nil, sdkerrors.Wrap(err, "channel handshake open try failed") + } + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeChannelOpenTry, + sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId), + sdk.NewAttribute(types.AttributeKeyChannelID, channelID), + sdk.NewAttribute(types.AttributeCounterpartyPortID, msg.Channel.Counterparty.PortId), + sdk.NewAttribute(types.AttributeCounterpartyChannelID, msg.Channel.Counterparty.ChannelId), + sdk.NewAttribute(types.AttributeKeyConnectionID, msg.Channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, channelID, capKey, nil +} + +// HandleMsgChannelOpenAck defines the sdk.Handler for MsgChannelOpenAck +func HandleMsgChannelOpenAck(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelOpenAck) (*sdk.Result, error) { + err := k.ChanOpenAck( + ctx, msg.PortId, msg.ChannelId, channelCap, msg.CounterpartyVersion, msg.CounterpartyChannelId, msg.ProofTry, msg.ProofHeight, + ) + if err != nil { + return nil, sdkerrors.Wrap(err, "channel handshake open ack failed") + } + + channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId) + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeChannelOpenAck, + sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId), + sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId), + sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId), + sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId), + sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, nil +} + +// HandleMsgChannelOpenConfirm defines the sdk.Handler for MsgChannelOpenConfirm +func HandleMsgChannelOpenConfirm(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelOpenConfirm) (*sdk.Result, error) { + err := k.ChanOpenConfirm(ctx, msg.PortId, msg.ChannelId, channelCap, msg.ProofAck, msg.ProofHeight) + if err != nil { + return nil, sdkerrors.Wrap(err, "channel handshake open confirm failed") + } + + channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId) + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeChannelOpenConfirm, + sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId), + sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId), + sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId), + sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId), + sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, nil +} + +// HandleMsgChannelCloseInit defines the sdk.Handler for MsgChannelCloseInit +func HandleMsgChannelCloseInit(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelCloseInit) (*sdk.Result, error) { + err := k.ChanCloseInit(ctx, msg.PortId, msg.ChannelId, channelCap) + if err != nil { + return nil, sdkerrors.Wrap(err, "channel handshake close init failed") + } + + channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId) + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeChannelCloseInit, + sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId), + sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId), + sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId), + sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId), + sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, nil +} + +// HandleMsgChannelCloseConfirm defines the sdk.Handler for MsgChannelCloseConfirm +func HandleMsgChannelCloseConfirm(ctx sdk.Context, k keeper.Keeper, channelCap *capabilitytypes.Capability, msg *types.MsgChannelCloseConfirm) (*sdk.Result, error) { + err := k.ChanCloseConfirm(ctx, msg.PortId, msg.ChannelId, channelCap, msg.ProofInit, msg.ProofHeight) + if err != nil { + return nil, sdkerrors.Wrap(err, "channel handshake close confirm failed") + } + + channel, _ := k.GetChannel(ctx, msg.PortId, msg.ChannelId) + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeChannelCloseConfirm, + sdk.NewAttribute(types.AttributeKeyPortID, msg.PortId), + sdk.NewAttribute(types.AttributeKeyChannelID, msg.ChannelId), + sdk.NewAttribute(types.AttributeCounterpartyPortID, channel.Counterparty.PortId), + sdk.NewAttribute(types.AttributeCounterpartyChannelID, channel.Counterparty.ChannelId), + sdk.NewAttribute(types.AttributeKeyConnectionID, channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return &sdk.Result{ + Events: ctx.EventManager().Events().ToABCIEvents(), + }, nil +} diff --git a/core/04-channel/keeper/grpc_query.go b/core/04-channel/keeper/grpc_query.go new file mode 100644 index 0000000000..30df0a33ac --- /dev/null +++ b/core/04-channel/keeper/grpc_query.go @@ -0,0 +1,486 @@ +package keeper + +import ( + "context" + "strconv" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/query" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +var _ types.QueryServer = (*Keeper)(nil) + +// Channel implements the Query/Channel gRPC method +func (q Keeper) Channel(c context.Context, req *types.QueryChannelRequest) (*types.QueryChannelResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + ctx := sdk.UnwrapSDKContext(c) + channel, found := q.GetChannel(ctx, req.PortId, req.ChannelId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(types.ErrChannelNotFound, "port-id: %s, channel-id %s", req.PortId, req.ChannelId).Error(), + ) + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return types.NewQueryChannelResponse(channel, nil, selfHeight), nil +} + +// Channels implements the Query/Channels gRPC method +func (q Keeper) Channels(c context.Context, req *types.QueryChannelsRequest) (*types.QueryChannelsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + ctx := sdk.UnwrapSDKContext(c) + + channels := []*types.IdentifiedChannel{} + store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.KeyChannelEndPrefix)) + + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + var result types.Channel + if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil { + return err + } + + portID, channelID, err := host.ParseChannelPath(string(key)) + if err != nil { + return err + } + + identifiedChannel := types.NewIdentifiedChannel(portID, channelID, result) + channels = append(channels, &identifiedChannel) + return nil + }) + + if err != nil { + return nil, err + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return &types.QueryChannelsResponse{ + Channels: channels, + Pagination: pageRes, + Height: selfHeight, + }, nil +} + +// ConnectionChannels implements the Query/ConnectionChannels gRPC method +func (q Keeper) ConnectionChannels(c context.Context, req *types.QueryConnectionChannelsRequest) (*types.QueryConnectionChannelsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := host.ConnectionIdentifierValidator(req.Connection); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + ctx := sdk.UnwrapSDKContext(c) + + channels := []*types.IdentifiedChannel{} + store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.KeyChannelEndPrefix)) + + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + var result types.Channel + if err := q.cdc.UnmarshalBinaryBare(value, &result); err != nil { + return err + } + + // ignore channel and continue to the next item if the connection is + // different than the requested one + if result.ConnectionHops[0] != req.Connection { + return nil + } + + portID, channelID, err := host.ParseChannelPath(string(key)) + if err != nil { + return err + } + + identifiedChannel := types.NewIdentifiedChannel(portID, channelID, result) + channels = append(channels, &identifiedChannel) + return nil + }) + + if err != nil { + return nil, err + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return &types.QueryConnectionChannelsResponse{ + Channels: channels, + Pagination: pageRes, + Height: selfHeight, + }, nil +} + +// ChannelClientState implements the Query/ChannelClientState gRPC method +func (q Keeper) ChannelClientState(c context.Context, req *types.QueryChannelClientStateRequest) (*types.QueryChannelClientStateResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + ctx := sdk.UnwrapSDKContext(c) + + clientID, clientState, err := q.GetChannelClientState(ctx, req.PortId, req.ChannelId) + if err != nil { + return nil, status.Error(codes.NotFound, err.Error()) + } + + identifiedClientState := clienttypes.NewIdentifiedClientState(clientID, clientState) + + selfHeight := clienttypes.GetSelfHeight(ctx) + return types.NewQueryChannelClientStateResponse(identifiedClientState, nil, selfHeight), nil +} + +// ChannelConsensusState implements the Query/ChannelConsensusState gRPC method +func (q Keeper) ChannelConsensusState(c context.Context, req *types.QueryChannelConsensusStateRequest) (*types.QueryChannelConsensusStateResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + ctx := sdk.UnwrapSDKContext(c) + + channel, found := q.GetChannel(ctx, req.PortId, req.ChannelId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(types.ErrChannelNotFound, "port-id: %s, channel-id %s", req.PortId, req.ChannelId).Error(), + ) + } + + connection, found := q.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(connectiontypes.ErrConnectionNotFound, "connection-id: %s", channel.ConnectionHops[0]).Error(), + ) + } + + consHeight := clienttypes.NewHeight(req.RevisionNumber, req.RevisionHeight) + consensusState, found := q.clientKeeper.GetClientConsensusState(ctx, connection.ClientId, consHeight) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(clienttypes.ErrConsensusStateNotFound, "client-id: %s", connection.ClientId).Error(), + ) + } + + anyConsensusState, err := clienttypes.PackConsensusState(consensusState) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return types.NewQueryChannelConsensusStateResponse(connection.ClientId, anyConsensusState, consHeight, nil, selfHeight), nil +} + +// PacketCommitment implements the Query/PacketCommitment gRPC method +func (q Keeper) PacketCommitment(c context.Context, req *types.QueryPacketCommitmentRequest) (*types.QueryPacketCommitmentResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + if req.Sequence == 0 { + return nil, status.Error(codes.InvalidArgument, "packet sequence cannot be 0") + } + + ctx := sdk.UnwrapSDKContext(c) + + commitmentBz := q.GetPacketCommitment(ctx, req.PortId, req.ChannelId, req.Sequence) + if len(commitmentBz) == 0 { + return nil, status.Error(codes.NotFound, "packet commitment hash not found") + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return types.NewQueryPacketCommitmentResponse(commitmentBz, nil, selfHeight), nil +} + +// PacketCommitments implements the Query/PacketCommitments gRPC method +func (q Keeper) PacketCommitments(c context.Context, req *types.QueryPacketCommitmentsRequest) (*types.QueryPacketCommitmentsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + ctx := sdk.UnwrapSDKContext(c) + + commitments := []*types.PacketState{} + store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.PacketCommitmentPrefixPath(req.PortId, req.ChannelId))) + + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + keySplit := strings.Split(string(key), "/") + + sequence, err := strconv.ParseUint(keySplit[len(keySplit)-1], 10, 64) + if err != nil { + return err + } + + commitment := types.NewPacketState(req.PortId, req.ChannelId, sequence, value) + commitments = append(commitments, &commitment) + return nil + }) + + if err != nil { + return nil, err + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return &types.QueryPacketCommitmentsResponse{ + Commitments: commitments, + Pagination: pageRes, + Height: selfHeight, + }, nil +} + +// PacketReceipt implements the Query/PacketReceipt gRPC method +func (q Keeper) PacketReceipt(c context.Context, req *types.QueryPacketReceiptRequest) (*types.QueryPacketReceiptResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + if req.Sequence == 0 { + return nil, status.Error(codes.InvalidArgument, "packet sequence cannot be 0") + } + + ctx := sdk.UnwrapSDKContext(c) + + _, recvd := q.GetPacketReceipt(ctx, req.PortId, req.ChannelId, req.Sequence) + + selfHeight := clienttypes.GetSelfHeight(ctx) + return types.NewQueryPacketReceiptResponse(recvd, nil, selfHeight), nil +} + +// PacketAcknowledgement implements the Query/PacketAcknowledgement gRPC method +func (q Keeper) PacketAcknowledgement(c context.Context, req *types.QueryPacketAcknowledgementRequest) (*types.QueryPacketAcknowledgementResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + if req.Sequence == 0 { + return nil, status.Error(codes.InvalidArgument, "packet sequence cannot be 0") + } + + ctx := sdk.UnwrapSDKContext(c) + + acknowledgementBz, found := q.GetPacketAcknowledgement(ctx, req.PortId, req.ChannelId, req.Sequence) + if !found || len(acknowledgementBz) == 0 { + return nil, status.Error(codes.NotFound, "packet acknowledgement hash not found") + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return types.NewQueryPacketAcknowledgementResponse(acknowledgementBz, nil, selfHeight), nil +} + +// PacketAcknowledgements implements the Query/PacketAcknowledgements gRPC method +func (q Keeper) PacketAcknowledgements(c context.Context, req *types.QueryPacketAcknowledgementsRequest) (*types.QueryPacketAcknowledgementsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + ctx := sdk.UnwrapSDKContext(c) + + acks := []*types.PacketState{} + store := prefix.NewStore(ctx.KVStore(q.storeKey), []byte(host.PacketAcknowledgementPrefixPath(req.PortId, req.ChannelId))) + + pageRes, err := query.Paginate(store, req.Pagination, func(key, value []byte) error { + keySplit := strings.Split(string(key), "/") + + sequence, err := strconv.ParseUint(keySplit[len(keySplit)-1], 10, 64) + if err != nil { + return err + } + + ack := types.NewPacketState(req.PortId, req.ChannelId, sequence, value) + acks = append(acks, &ack) + return nil + }) + + if err != nil { + return nil, err + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return &types.QueryPacketAcknowledgementsResponse{ + Acknowledgements: acks, + Pagination: pageRes, + Height: selfHeight, + }, nil +} + +// UnreceivedPackets implements the Query/UnreceivedPackets gRPC method. Given +// a list of counterparty packet commitments, the querier checks if the packet +// has already been received by checking if a receipt exists on this +// chain for the packet sequence. All packets that haven't been received yet +// are returned in the response +// Usage: To use this method correctly, first query all packet commitments on +// the sending chain using the Query/PacketCommitments gRPC method. +// Then input the returned sequences into the QueryUnreceivedPacketsRequest +// and send the request to this Query/UnreceivedPackets on the **receiving** +// chain. This gRPC method will then return the list of packet sequences that +// are yet to be received on the receiving chain. +// +// NOTE: The querier makes the assumption that the provided list of packet +// commitments is correct and will not function properly if the list +// is not up to date. Ideally the query height should equal the latest height +// on the counterparty's client which represents this chain. +func (q Keeper) UnreceivedPackets(c context.Context, req *types.QueryUnreceivedPacketsRequest) (*types.QueryUnreceivedPacketsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + ctx := sdk.UnwrapSDKContext(c) + + var unreceivedSequences = []uint64{} + + for i, seq := range req.PacketCommitmentSequences { + if seq == 0 { + return nil, status.Errorf(codes.InvalidArgument, "packet sequence %d cannot be 0", i) + } + + // if packet receipt exists on the receiving chain, then packet has already been received + if _, found := q.GetPacketReceipt(ctx, req.PortId, req.ChannelId, seq); !found { + unreceivedSequences = append(unreceivedSequences, seq) + } + + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return &types.QueryUnreceivedPacketsResponse{ + Sequences: unreceivedSequences, + Height: selfHeight, + }, nil +} + +// UnreceivedAcks implements the Query/UnreceivedAcks gRPC method. Given +// a list of counterparty packet acknowledgements, the querier checks if the packet +// has already been received by checking if the packet commitment still exists on this +// chain (original sender) for the packet sequence. +// All acknowledgmeents that haven't been received yet are returned in the response. +// Usage: To use this method correctly, first query all packet acknowledgements on +// the original receiving chain (ie the chain that wrote the acks) using the Query/PacketAcknowledgements gRPC method. +// Then input the returned sequences into the QueryUnreceivedAcksRequest +// and send the request to this Query/UnreceivedAcks on the **original sending** +// chain. This gRPC method will then return the list of packet sequences whose +// acknowledgements are already written on the receiving chain but haven't yet +// been received back to the sending chain. +// +// NOTE: The querier makes the assumption that the provided list of packet +// acknowledgements is correct and will not function properly if the list +// is not up to date. Ideally the query height should equal the latest height +// on the counterparty's client which represents this chain. +func (q Keeper) UnreceivedAcks(c context.Context, req *types.QueryUnreceivedAcksRequest) (*types.QueryUnreceivedAcksResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + ctx := sdk.UnwrapSDKContext(c) + + var unreceivedSequences = []uint64{} + + for i, seq := range req.PacketAckSequences { + if seq == 0 { + return nil, status.Errorf(codes.InvalidArgument, "packet sequence %d cannot be 0", i) + } + + // if packet commitment still exists on the original sending chain, then packet ack has not been received + // since processing the ack will delete the packet commitment + if commitment := q.GetPacketCommitment(ctx, req.PortId, req.ChannelId, seq); len(commitment) != 0 { + unreceivedSequences = append(unreceivedSequences, seq) + } + + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return &types.QueryUnreceivedAcksResponse{ + Sequences: unreceivedSequences, + Height: selfHeight, + }, nil +} + +// NextSequenceReceive implements the Query/NextSequenceReceive gRPC method +func (q Keeper) NextSequenceReceive(c context.Context, req *types.QueryNextSequenceReceiveRequest) (*types.QueryNextSequenceReceiveResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "empty request") + } + + if err := validategRPCRequest(req.PortId, req.ChannelId); err != nil { + return nil, err + } + + ctx := sdk.UnwrapSDKContext(c) + sequence, found := q.GetNextSequenceRecv(ctx, req.PortId, req.ChannelId) + if !found { + return nil, status.Error( + codes.NotFound, + sdkerrors.Wrapf(types.ErrSequenceReceiveNotFound, "port-id: %s, channel-id %s", req.PortId, req.ChannelId).Error(), + ) + } + + selfHeight := clienttypes.GetSelfHeight(ctx) + return types.NewQueryNextSequenceReceiveResponse(sequence, nil, selfHeight), nil +} + +func validategRPCRequest(portID, channelID string) error { + if err := host.PortIdentifierValidator(portID); err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + + if err := host.ChannelIdentifierValidator(channelID); err != nil { + return status.Error(codes.InvalidArgument, err.Error()) + } + + return nil +} diff --git a/core/04-channel/keeper/grpc_query_test.go b/core/04-channel/keeper/grpc_query_test.go new file mode 100644 index 0000000000..689c241c7b --- /dev/null +++ b/core/04-channel/keeper/grpc_query_test.go @@ -0,0 +1,1376 @@ +package keeper_test + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *KeeperTestSuite) TestQueryChannel() { + var ( + req *types.QueryChannelRequest + expChannel types.Channel + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryChannelRequest{ + PortId: "", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryChannelRequest{ + PortId: "test-port-id", + ChannelId: "", + } + }, + false, + }, + {"channel not found", + func() { + req = &types.QueryChannelRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "success", + func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + // init channel + channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + expChannel = suite.chainA.GetChannel(channelA) + + req = &types.QueryChannelRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.Channel(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(&expChannel, res.Channel) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryChannels() { + var ( + req *types.QueryChannelsRequest + expChannels = []*types.IdentifiedChannel{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "empty pagination", + func() { + req = &types.QueryChannelsRequest{} + }, + true, + }, + { + "success", + func() { + _, _, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // channel0 on first connection on chainA + counterparty0 := types.Counterparty{ + PortId: connB0.Channels[0].PortID, + ChannelId: connB0.Channels[0].ID, + } + + // channel1 is second channel on first connection on chainA + testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED) + counterparty1 := types.Counterparty{ + PortId: connB0.Channels[1].PortID, + ChannelId: connB0.Channels[1].ID, + } + + channel0 := types.NewChannel( + types.OPEN, types.UNORDERED, + counterparty0, []string{connA0.ID}, testchannel0.Version, + ) + channel1 := types.NewChannel( + types.OPEN, types.ORDERED, + counterparty1, []string{connA0.ID}, testchannel1.Version, + ) + + idCh0 := types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0) + idCh1 := types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1) + + expChannels = []*types.IdentifiedChannel{&idCh0, &idCh1} + + req = &types.QueryChannelsRequest{ + Pagination: &query.PageRequest{ + Key: nil, + Limit: 2, + CountTotal: true, + }, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.Channels(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expChannels, res.Channels) + suite.Require().Equal(len(expChannels), int(res.Pagination.Total)) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryConnectionChannels() { + var ( + req *types.QueryConnectionChannelsRequest + expChannels = []*types.IdentifiedChannel{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid connection ID", + func() { + req = &types.QueryConnectionChannelsRequest{ + Connection: "", + } + }, + false, + }, + { + "success", + func() { + _, _, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // channel0 on first connection on chainA + counterparty0 := types.Counterparty{ + PortId: connB0.Channels[0].PortID, + ChannelId: connB0.Channels[0].ID, + } + + // channel1 is second channel on first connection on chainA + testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED) + counterparty1 := types.Counterparty{ + PortId: connB0.Channels[1].PortID, + ChannelId: connB0.Channels[1].ID, + } + + channel0 := types.NewChannel( + types.OPEN, types.UNORDERED, + counterparty0, []string{connA0.ID}, testchannel0.Version, + ) + channel1 := types.NewChannel( + types.OPEN, types.ORDERED, + counterparty1, []string{connA0.ID}, testchannel1.Version, + ) + + idCh0 := types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0) + idCh1 := types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1) + + expChannels = []*types.IdentifiedChannel{&idCh0, &idCh1} + + req = &types.QueryConnectionChannelsRequest{ + Connection: connA0.ID, + Pagination: &query.PageRequest{ + Key: nil, + Limit: 2, + CountTotal: true, + }, + } + }, + true, + }, + { + "success, empty response", + func() { + suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + expChannels = []*types.IdentifiedChannel{} + req = &types.QueryConnectionChannelsRequest{ + Connection: "externalConnID", + Pagination: &query.PageRequest{ + Key: nil, + Limit: 2, + CountTotal: false, + }, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.ConnectionChannels(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expChannels, res.Channels) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryChannelClientState() { + var ( + req *types.QueryChannelClientStateRequest + expIdentifiedClientState clienttypes.IdentifiedClientState + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryChannelClientStateRequest{ + PortId: "", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryChannelClientStateRequest{ + PortId: "test-port-id", + ChannelId: "", + } + }, + false, + }, + { + "channel not found", + func() { + req = &types.QueryChannelClientStateRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "connection not found", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + channel := suite.chainA.GetChannel(channelA) + // update channel to reference a connection that does not exist + channel.ConnectionHops[0] = "doesnotexist" + + // set connection hops to wrong connection ID + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel) + + req = &types.QueryChannelClientStateRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + } + }, false, + }, + { + "client state for channel's connection not found", + func() { + _, _, connA, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + // set connection to empty so clientID is empty + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connectiontypes.ConnectionEnd{}) + + req = &types.QueryChannelClientStateRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + } + }, false, + }, + { + "success", + func() { + clientA, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + // init channel + channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + expClientState := suite.chainA.GetClientState(clientA) + expIdentifiedClientState = clienttypes.NewIdentifiedClientState(clientA, expClientState) + + req = &types.QueryChannelClientStateRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.ChannelClientState(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(&expIdentifiedClientState, res.IdentifiedClientState) + + // ensure UnpackInterfaces is defined + cachedValue := res.IdentifiedClientState.ClientState.GetCachedValue() + suite.Require().NotNil(cachedValue) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryChannelConsensusState() { + var ( + req *types.QueryChannelConsensusStateRequest + expConsensusState exported.ConsensusState + expClientID string + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryChannelConsensusStateRequest{ + PortId: "", + ChannelId: "test-channel-id", + RevisionNumber: 0, + RevisionHeight: 1, + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryChannelConsensusStateRequest{ + PortId: "test-port-id", + ChannelId: "", + RevisionNumber: 0, + RevisionHeight: 1, + } + }, + false, + }, + { + "channel not found", + func() { + req = &types.QueryChannelConsensusStateRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + RevisionNumber: 0, + RevisionHeight: 1, + } + }, + false, + }, + { + "connection not found", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + channel := suite.chainA.GetChannel(channelA) + // update channel to reference a connection that does not exist + channel.ConnectionHops[0] = "doesnotexist" + + // set connection hops to wrong connection ID + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel) + + req = &types.QueryChannelConsensusStateRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + RevisionNumber: 0, + RevisionHeight: 1, + } + }, false, + }, + { + "consensus state for channel's connection not found", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + req = &types.QueryChannelConsensusStateRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + RevisionNumber: 0, + RevisionHeight: uint64(suite.chainA.GetContext().BlockHeight()), // use current height + } + }, false, + }, + { + "success", + func() { + clientA, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + // init channel + channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + clientState := suite.chainA.GetClientState(clientA) + expConsensusState, _ = suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight()) + suite.Require().NotNil(expConsensusState) + expClientID = clientA + + req = &types.QueryChannelConsensusStateRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + RevisionNumber: clientState.GetLatestHeight().GetRevisionNumber(), + RevisionHeight: clientState.GetLatestHeight().GetRevisionHeight(), + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.ChannelConsensusState(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + consensusState, err := clienttypes.UnpackConsensusState(res.ConsensusState) + suite.Require().NoError(err) + suite.Require().Equal(expConsensusState, consensusState) + suite.Require().Equal(expClientID, res.ClientId) + + // ensure UnpackInterfaces is defined + cachedValue := res.ConsensusState.GetCachedValue() + suite.Require().NotNil(cachedValue) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryPacketCommitment() { + var ( + req *types.QueryPacketCommitmentRequest + expCommitment []byte + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryPacketCommitmentRequest{ + PortId: "", + ChannelId: "test-channel-id", + Sequence: 0, + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryPacketCommitmentRequest{ + PortId: "test-port-id", + ChannelId: "", + Sequence: 0, + } + }, + false, + }, + {"invalid sequence", + func() { + req = &types.QueryPacketCommitmentRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + Sequence: 0, + } + }, + false, + }, + {"channel not found", + func() { + req = &types.QueryPacketCommitmentRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + Sequence: 1, + } + }, + false, + }, + { + "success", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + expCommitment = []byte("hash") + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, expCommitment) + + req = &types.QueryPacketCommitmentRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + Sequence: 1, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.PacketCommitment(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expCommitment, res.Commitment) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryPacketCommitments() { + var ( + req *types.QueryPacketCommitmentsRequest + expCommitments = []*types.PacketState{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid ID", + func() { + req = &types.QueryPacketCommitmentsRequest{ + PortId: "", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "success, empty res", + func() { + expCommitments = []*types.PacketState{} + + req = &types.QueryPacketCommitmentsRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + Pagination: &query.PageRequest{ + Key: nil, + Limit: 2, + CountTotal: true, + }, + } + }, + true, + }, + { + "success", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + expCommitments = make([]*types.PacketState, 9) + + for i := uint64(0); i < 9; i++ { + commitment := types.NewPacketState(channelA.PortID, channelA.ID, i, []byte(fmt.Sprintf("hash_%d", i))) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), commitment.PortId, commitment.ChannelId, commitment.Sequence, commitment.Data) + expCommitments[i] = &commitment + } + + req = &types.QueryPacketCommitmentsRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + Pagination: &query.PageRequest{ + Key: nil, + Limit: 11, + CountTotal: true, + }, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.PacketCommitments(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expCommitments, res.Commitments) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryPacketReceipt() { + var ( + req *types.QueryPacketReceiptRequest + expReceived bool + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryPacketReceiptRequest{ + PortId: "", + ChannelId: "test-channel-id", + Sequence: 1, + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryPacketReceiptRequest{ + PortId: "test-port-id", + ChannelId: "", + Sequence: 1, + } + }, + false, + }, + {"invalid sequence", + func() { + req = &types.QueryPacketReceiptRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + Sequence: 0, + } + }, + false, + }, + { + "success: receipt not found", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1) + + req = &types.QueryPacketReceiptRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + Sequence: 3, + } + expReceived = false + }, + true, + }, + { + "success: receipt found", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1) + + req = &types.QueryPacketReceiptRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + Sequence: 1, + } + expReceived = true + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.PacketReceipt(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expReceived, res.Received) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { + var ( + req *types.QueryPacketAcknowledgementRequest + expAck []byte + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryPacketAcknowledgementRequest{ + PortId: "", + ChannelId: "test-channel-id", + Sequence: 0, + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryPacketAcknowledgementRequest{ + PortId: "test-port-id", + ChannelId: "", + Sequence: 0, + } + }, + false, + }, + {"invalid sequence", + func() { + req = &types.QueryPacketAcknowledgementRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + Sequence: 0, + } + }, + false, + }, + {"channel not found", + func() { + req = &types.QueryPacketAcknowledgementRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + Sequence: 1, + } + }, + false, + }, + { + "success", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + expAck = []byte("hash") + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, expAck) + + req = &types.QueryPacketAcknowledgementRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + Sequence: 1, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.PacketAcknowledgement(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expAck, res.Acknowledgement) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { + var ( + req *types.QueryPacketAcknowledgementsRequest + expAcknowledgements = []*types.PacketState{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid ID", + func() { + req = &types.QueryPacketAcknowledgementsRequest{ + PortId: "", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "success, empty res", + func() { + expAcknowledgements = []*types.PacketState{} + + req = &types.QueryPacketAcknowledgementsRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + Pagination: &query.PageRequest{ + Key: nil, + Limit: 2, + CountTotal: true, + }, + } + }, + true, + }, + { + "success", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + expAcknowledgements = make([]*types.PacketState, 9) + + for i := uint64(0); i < 9; i++ { + ack := types.NewPacketState(channelA.PortID, channelA.ID, i, []byte(fmt.Sprintf("hash_%d", i))) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) + expAcknowledgements[i] = &ack + } + + req = &types.QueryPacketAcknowledgementsRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + Pagination: &query.PageRequest{ + Key: nil, + Limit: 11, + CountTotal: true, + }, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.PacketAcknowledgements(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expAcknowledgements, res.Acknowledgements) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { + var ( + req *types.QueryUnreceivedPacketsRequest + expSeq = []uint64{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryUnreceivedPacketsRequest{ + PortId: "", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryUnreceivedPacketsRequest{ + PortId: "test-port-id", + ChannelId: "", + } + }, + false, + }, + { + "invalid seq", + func() { + req = &types.QueryUnreceivedPacketsRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + PacketCommitmentSequences: []uint64{0}, + } + }, + false, + }, + { + "basic success unreceived packet commitments", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + // no ack exists + + expSeq = []uint64{1} + req = &types.QueryUnreceivedPacketsRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + PacketCommitmentSequences: []uint64{1}, + } + }, + true, + }, + { + "basic success unreceived packet commitments, nothing to relay", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1) + + expSeq = []uint64{} + req = &types.QueryUnreceivedPacketsRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + PacketCommitmentSequences: []uint64{1}, + } + }, + true, + }, + { + "success multiple unreceived packet commitments", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + expSeq = []uint64{} // reset + packetCommitments := []uint64{} + + // set packet receipt for every other sequence + for seq := uint64(1); seq < 10; seq++ { + packetCommitments = append(packetCommitments, seq) + + if seq%2 == 0 { + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), channelA.PortID, channelA.ID, seq) + } else { + expSeq = append(expSeq, seq) + } + } + + req = &types.QueryUnreceivedPacketsRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + PacketCommitmentSequences: packetCommitments, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.UnreceivedPackets(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expSeq, res.Sequences) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { + var ( + req *types.QueryUnreceivedAcksRequest + expSeq = []uint64{} + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryUnreceivedAcksRequest{ + PortId: "", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryUnreceivedAcksRequest{ + PortId: "test-port-id", + ChannelId: "", + } + }, + false, + }, + { + "invalid seq", + func() { + req = &types.QueryUnreceivedAcksRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + PacketAckSequences: []uint64{0}, + } + }, + false, + }, + { + "basic success unreceived packet acks", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 1, []byte("commitment")) + + expSeq = []uint64{1} + req = &types.QueryUnreceivedAcksRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + PacketAckSequences: []uint64{1}, + } + }, + true, + }, + { + "basic success unreceived packet acknowledgements, nothing to relay", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + expSeq = []uint64{} + req = &types.QueryUnreceivedAcksRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + PacketAckSequences: []uint64{1}, + } + }, + true, + }, + { + "success multiple unreceived packet acknowledgements", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + expSeq = []uint64{} // reset + packetAcks := []uint64{} + + // set packet commitment for every other sequence + for seq := uint64(1); seq < 10; seq++ { + packetAcks = append(packetAcks, seq) + + if seq%2 == 0 { + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, seq, []byte("commitement")) + expSeq = append(expSeq, seq) + } + } + + req = &types.QueryUnreceivedAcksRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + PacketAckSequences: packetAcks, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.UnreceivedAcks(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expSeq, res.Sequences) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestQueryNextSequenceReceive() { + var ( + req *types.QueryNextSequenceReceiveRequest + expSeq uint64 + ) + + testCases := []struct { + msg string + malleate func() + expPass bool + }{ + { + "empty request", + func() { + req = nil + }, + false, + }, + { + "invalid port ID", + func() { + req = &types.QueryNextSequenceReceiveRequest{ + PortId: "", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "invalid channel ID", + func() { + req = &types.QueryNextSequenceReceiveRequest{ + PortId: "test-port-id", + ChannelId: "", + } + }, + false, + }, + {"channel not found", + func() { + req = &types.QueryNextSequenceReceiveRequest{ + PortId: "test-port-id", + ChannelId: "test-channel-id", + } + }, + false, + }, + { + "success", + func() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + expSeq = 1 + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(suite.chainA.GetContext(), channelA.PortID, channelA.ID, expSeq) + + req = &types.QueryNextSequenceReceiveRequest{ + PortId: channelA.PortID, + ChannelId: channelA.ID, + } + }, + true, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + ctx := sdk.WrapSDKContext(suite.chainA.GetContext()) + + res, err := suite.chainA.QueryServer.NextSequenceReceive(ctx, req) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(res) + suite.Require().Equal(expSeq, res.NextSequenceReceive) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/core/04-channel/keeper/handshake.go b/core/04-channel/keeper/handshake.go new file mode 100644 index 0000000000..b7cff480c9 --- /dev/null +++ b/core/04-channel/keeper/handshake.go @@ -0,0 +1,496 @@ +package keeper + +import ( + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CounterpartyHops returns the connection hops of the counterparty channel. +// The counterparty hops are stored in the inverse order as the channel's. +// NOTE: Since connectionHops only supports single connection channels for now, +// this function requires that connection hops only contain a single connection id +func (k Keeper) CounterpartyHops(ctx sdk.Context, ch types.Channel) ([]string, bool) { + // Return empty array if connection hops is more than one + // ConnectionHops length should be verified earlier + if len(ch.ConnectionHops) != 1 { + return []string{}, false + } + counterpartyHops := make([]string, 1) + hop := ch.ConnectionHops[0] + conn, found := k.connectionKeeper.GetConnection(ctx, hop) + if !found { + return []string{}, false + } + + counterpartyHops[0] = conn.GetCounterparty().GetConnectionID() + return counterpartyHops, true +} + +// ChanOpenInit is called by a module to initiate a channel opening handshake with +// a module on another chain. The counterparty channel identifier is validated to be +// empty in msg validation. +func (k Keeper) ChanOpenInit( + ctx sdk.Context, + order types.Order, + connectionHops []string, + portID string, + portCap *capabilitytypes.Capability, + counterparty types.Counterparty, + version string, +) (string, *capabilitytypes.Capability, error) { + // connection hop length checked on msg.ValidateBasic() + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, connectionHops[0]) + if !found { + return "", nil, sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, connectionHops[0]) + } + + getVersions := connectionEnd.GetVersions() + if len(getVersions) != 1 { + return "", nil, sdkerrors.Wrapf( + connectiontypes.ErrInvalidVersion, + "single version must be negotiated on connection before opening channel, got: %v", + getVersions, + ) + } + + if !connectiontypes.VerifySupportedFeature(getVersions[0], order.String()) { + return "", nil, sdkerrors.Wrapf( + connectiontypes.ErrInvalidVersion, + "connection version %s does not support channel ordering: %s", + getVersions[0], order.String(), + ) + } + + if !k.portKeeper.Authenticate(ctx, portCap, portID) { + return "", nil, sdkerrors.Wrapf(porttypes.ErrInvalidPort, "caller does not own port capability for port ID %s", portID) + } + + channelID := k.GenerateChannelIdentifier(ctx) + channel := types.NewChannel(types.INIT, order, counterparty, connectionHops, version) + k.SetChannel(ctx, portID, channelID, channel) + + capKey, err := k.scopedKeeper.NewCapability(ctx, host.ChannelCapabilityPath(portID, channelID)) + if err != nil { + return "", nil, sdkerrors.Wrapf(err, "could not create channel capability for port ID %s and channel ID %s", portID, channelID) + } + + k.SetNextSequenceSend(ctx, portID, channelID, 1) + k.SetNextSequenceRecv(ctx, portID, channelID, 1) + k.SetNextSequenceAck(ctx, portID, channelID, 1) + + k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", "NONE", "new-state", "INIT") + + defer func() { + telemetry.IncrCounter(1, "ibc", "channel", "open-init") + }() + + return channelID, capKey, nil +} + +// ChanOpenTry is called by a module to accept the first step of a channel opening +// handshake initiated by a module on another chain. +func (k Keeper) ChanOpenTry( + ctx sdk.Context, + order types.Order, + connectionHops []string, + portID, + previousChannelID string, + portCap *capabilitytypes.Capability, + counterparty types.Counterparty, + version, + counterpartyVersion string, + proofInit []byte, + proofHeight exported.Height, +) (string, *capabilitytypes.Capability, error) { + var ( + previousChannel types.Channel + previousChannelFound bool + ) + + channelID := previousChannelID + + // empty channel identifier indicates continuing a previous channel handshake + if previousChannelID != "" { + // channel identifier and connection hop length checked on msg.ValidateBasic() + // ensure that the previous channel exists + previousChannel, previousChannelFound = k.GetChannel(ctx, portID, previousChannelID) + if !previousChannelFound { + return "", nil, sdkerrors.Wrapf(types.ErrInvalidChannel, "previous channel does not exist for supplied previous channelID %s", previousChannelID) + } + // previous channel must use the same fields + if !(previousChannel.Ordering == order && + previousChannel.Counterparty.PortId == counterparty.PortId && + previousChannel.Counterparty.ChannelId == "" && + previousChannel.ConnectionHops[0] == connectionHops[0] && + previousChannel.Version == version) { + return "", nil, sdkerrors.Wrap(types.ErrInvalidChannel, "channel fields mismatch previous channel fields") + } + + if previousChannel.State != types.INIT { + return "", nil, sdkerrors.Wrapf(types.ErrInvalidChannelState, "previous channel state is in %s, expected INIT", previousChannel.State) + } + + } else { + // generate a new channel + channelID = k.GenerateChannelIdentifier(ctx) + } + + if !k.portKeeper.Authenticate(ctx, portCap, portID) { + return "", nil, sdkerrors.Wrapf(porttypes.ErrInvalidPort, "caller does not own port capability for port ID %s", portID) + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, connectionHops[0]) + if !found { + return "", nil, sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, connectionHops[0]) + } + + if connectionEnd.GetState() != int32(connectiontypes.OPEN) { + return "", nil, sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnectionState, + "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(), + ) + } + + getVersions := connectionEnd.GetVersions() + if len(getVersions) != 1 { + return "", nil, sdkerrors.Wrapf( + connectiontypes.ErrInvalidVersion, + "single version must be negotiated on connection before opening channel, got: %v", + getVersions, + ) + } + + if !connectiontypes.VerifySupportedFeature(getVersions[0], order.String()) { + return "", nil, sdkerrors.Wrapf( + connectiontypes.ErrInvalidVersion, + "connection version %s does not support channel ordering: %s", + getVersions[0], order.String(), + ) + } + + // NOTE: this step has been switched with the one below to reverse the connection + // hops + channel := types.NewChannel(types.TRYOPEN, order, counterparty, connectionHops, version) + + counterpartyHops, found := k.CounterpartyHops(ctx, channel) + if !found { + // should not reach here, connectionEnd was able to be retrieved above + panic("cannot find connection") + } + + // expectedCounterpaty is the counterparty of the counterparty's channel end + // (i.e self) + expectedCounterparty := types.NewCounterparty(portID, "") + expectedChannel := types.NewChannel( + types.INIT, channel.Ordering, expectedCounterparty, + counterpartyHops, counterpartyVersion, + ) + + if err := k.connectionKeeper.VerifyChannelState( + ctx, connectionEnd, proofHeight, proofInit, + counterparty.PortId, counterparty.ChannelId, expectedChannel, + ); err != nil { + return "", nil, err + } + + var ( + capKey *capabilitytypes.Capability + err error + ) + + if !previousChannelFound { + capKey, err = k.scopedKeeper.NewCapability(ctx, host.ChannelCapabilityPath(portID, channelID)) + if err != nil { + return "", nil, sdkerrors.Wrapf(err, "could not create channel capability for port ID %s and channel ID %s", portID, channelID) + } + + k.SetNextSequenceSend(ctx, portID, channelID, 1) + k.SetNextSequenceRecv(ctx, portID, channelID, 1) + k.SetNextSequenceAck(ctx, portID, channelID, 1) + } else { + // capability initialized in ChanOpenInit + capKey, found = k.scopedKeeper.GetCapability(ctx, host.ChannelCapabilityPath(portID, channelID)) + if !found { + return "", nil, sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, + "capability not found for existing channel, portID (%s) channelID (%s)", portID, channelID, + ) + } + } + + k.SetChannel(ctx, portID, channelID, channel) + + k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", previousChannel.State.String(), "new-state", "TRYOPEN") + + defer func() { + telemetry.IncrCounter(1, "ibc", "channel", "open-try") + }() + + return channelID, capKey, nil +} + +// ChanOpenAck is called by the handshake-originating module to acknowledge the +// acceptance of the initial request by the counterparty module on the other chain. +func (k Keeper) ChanOpenAck( + ctx sdk.Context, + portID, + channelID string, + chanCap *capabilitytypes.Capability, + counterpartyVersion, + counterpartyChannelID string, + proofTry []byte, + proofHeight exported.Height, +) error { + channel, found := k.GetChannel(ctx, portID, channelID) + if !found { + return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID) + } + + if !(channel.State == types.INIT || channel.State == types.TRYOPEN) { + return sdkerrors.Wrapf( + types.ErrInvalidChannelState, + "channel state should be INIT or TRYOPEN (got %s)", channel.State.String(), + ) + } + + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) { + return sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)", portID, channelID) + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0]) + } + + if connectionEnd.GetState() != int32(connectiontypes.OPEN) { + return sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnectionState, + "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(), + ) + } + + counterpartyHops, found := k.CounterpartyHops(ctx, channel) + if !found { + // should not reach here, connectionEnd was able to be retrieved above + panic("cannot find connection") + } + + // counterparty of the counterparty channel end (i.e self) + expectedCounterparty := types.NewCounterparty(portID, channelID) + expectedChannel := types.NewChannel( + types.TRYOPEN, channel.Ordering, expectedCounterparty, + counterpartyHops, counterpartyVersion, + ) + + if err := k.connectionKeeper.VerifyChannelState( + ctx, connectionEnd, proofHeight, proofTry, + channel.Counterparty.PortId, counterpartyChannelID, + expectedChannel, + ); err != nil { + return err + } + + k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", channel.State.String(), "new-state", "OPEN") + + defer func() { + telemetry.IncrCounter(1, "ibc", "channel", "open-ack") + }() + + channel.State = types.OPEN + channel.Version = counterpartyVersion + channel.Counterparty.ChannelId = counterpartyChannelID + k.SetChannel(ctx, portID, channelID, channel) + + return nil +} + +// ChanOpenConfirm is called by the counterparty module to close their end of the +// channel, since the other end has been closed. +func (k Keeper) ChanOpenConfirm( + ctx sdk.Context, + portID, + channelID string, + chanCap *capabilitytypes.Capability, + proofAck []byte, + proofHeight exported.Height, +) error { + channel, found := k.GetChannel(ctx, portID, channelID) + if !found { + return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID) + } + + if channel.State != types.TRYOPEN { + return sdkerrors.Wrapf( + types.ErrInvalidChannelState, + "channel state is not TRYOPEN (got %s)", channel.State.String(), + ) + } + + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) { + return sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)", portID, channelID) + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0]) + } + + if connectionEnd.GetState() != int32(connectiontypes.OPEN) { + return sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnectionState, + "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(), + ) + } + + counterpartyHops, found := k.CounterpartyHops(ctx, channel) + if !found { + // Should not reach here, connectionEnd was able to be retrieved above + panic("cannot find connection") + } + + counterparty := types.NewCounterparty(portID, channelID) + expectedChannel := types.NewChannel( + types.OPEN, channel.Ordering, counterparty, + counterpartyHops, channel.Version, + ) + + if err := k.connectionKeeper.VerifyChannelState( + ctx, connectionEnd, proofHeight, proofAck, + channel.Counterparty.PortId, channel.Counterparty.ChannelId, + expectedChannel, + ); err != nil { + return err + } + + channel.State = types.OPEN + k.SetChannel(ctx, portID, channelID, channel) + k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", "TRYOPEN", "new-state", "OPEN") + + defer func() { + telemetry.IncrCounter(1, "ibc", "channel", "open-confirm") + }() + return nil +} + +// Closing Handshake +// +// This section defines the set of functions required to close a channel handshake +// as defined in https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#closing-handshake +// +// ChanCloseInit is called by either module to close their end of the channel. Once +// closed, channels cannot be reopened. +func (k Keeper) ChanCloseInit( + ctx sdk.Context, + portID, + channelID string, + chanCap *capabilitytypes.Capability, +) error { + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) { + return sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)", portID, channelID) + } + + channel, found := k.GetChannel(ctx, portID, channelID) + if !found { + return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID) + } + + if channel.State == types.CLOSED { + return sdkerrors.Wrap(types.ErrInvalidChannelState, "channel is already CLOSED") + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0]) + } + + if connectionEnd.GetState() != int32(connectiontypes.OPEN) { + return sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnectionState, + "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(), + ) + } + + k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", channel.State.String(), "new-state", "CLOSED") + + defer func() { + telemetry.IncrCounter(1, "ibc", "channel", "close-init") + }() + + channel.State = types.CLOSED + k.SetChannel(ctx, portID, channelID, channel) + + return nil +} + +// ChanCloseConfirm is called by the counterparty module to close their end of the +// channel, since the other end has been closed. +func (k Keeper) ChanCloseConfirm( + ctx sdk.Context, + portID, + channelID string, + chanCap *capabilitytypes.Capability, + proofInit []byte, + proofHeight exported.Height, +) error { + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)) { + return sdkerrors.Wrap(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)") + } + + channel, found := k.GetChannel(ctx, portID, channelID) + if !found { + return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID) + } + + if channel.State == types.CLOSED { + return sdkerrors.Wrap(types.ErrInvalidChannelState, "channel is already CLOSED") + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0]) + } + + if connectionEnd.GetState() != int32(connectiontypes.OPEN) { + return sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnectionState, + "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(), + ) + } + + counterpartyHops, found := k.CounterpartyHops(ctx, channel) + if !found { + // Should not reach here, connectionEnd was able to be retrieved above + panic("cannot find connection") + } + + counterparty := types.NewCounterparty(portID, channelID) + expectedChannel := types.NewChannel( + types.CLOSED, channel.Ordering, counterparty, + counterpartyHops, channel.Version, + ) + + if err := k.connectionKeeper.VerifyChannelState( + ctx, connectionEnd, proofHeight, proofInit, + channel.Counterparty.PortId, channel.Counterparty.ChannelId, + expectedChannel, + ); err != nil { + return err + } + + k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", channel.State.String(), "new-state", "CLOSED") + + defer func() { + telemetry.IncrCounter(1, "ibc", "channel", "close-confirm") + }() + + channel.State = types.CLOSED + k.SetChannel(ctx, portID, channelID, channel) + + return nil +} diff --git a/core/04-channel/keeper/handshake_test.go b/core/04-channel/keeper/handshake_test.go new file mode 100644 index 0000000000..120e1f8fe2 --- /dev/null +++ b/core/04-channel/keeper/handshake_test.go @@ -0,0 +1,773 @@ +package keeper_test + +import ( + "fmt" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type testCase = struct { + msg string + malleate func() + expPass bool +} + +// TestChanOpenInit tests the OpenInit handshake call for channels. It uses message passing +// to enter into the appropriate state and then calls ChanOpenInit directly. The channel is +// being created on chainA. The port capability must be created on chainA before ChanOpenInit +// can succeed. +func (suite *KeeperTestSuite) TestChanOpenInit() { + var ( + connA *ibctesting.TestConnection + connB *ibctesting.TestConnection + features []string + portCap *capabilitytypes.Capability + ) + + testCases := []testCase{ + {"success", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"} + suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID) + portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID) + }, true}, + {"channel already exists", func() { + _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + }, false}, + {"connection doesn't exist", func() { + // any non-nil values of connA and connB are acceptable + suite.Require().NotNil(connA) + suite.Require().NotNil(connB) + }, false}, + {"capability is incorrect", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"} + portCap = capabilitytypes.NewCapability(3) + }, false}, + {"connection version not negotiated", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + // modify connA versions + conn := suite.chainA.GetConnection(connA) + + version := connectiontypes.NewVersion("2", []string{"ORDER_ORDERED", "ORDER_UNORDERED"}) + conn.Versions = append(conn.Versions, version) + + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection( + suite.chainA.GetContext(), + connA.ID, conn, + ) + features = []string{"ORDER_ORDERED", "ORDER_UNORDERED"} + suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID) + portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID) + }, false}, + {"connection does not support ORDERED channels", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + // modify connA versions to only support UNORDERED channels + conn := suite.chainA.GetConnection(connA) + + version := connectiontypes.NewVersion("1", []string{"ORDER_UNORDERED"}) + conn.Versions = []*connectiontypes.Version{version} + + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection( + suite.chainA.GetContext(), + connA.ID, conn, + ) + // NOTE: Opening UNORDERED channels is still expected to pass but ORDERED channels should fail + features = []string{"ORDER_UNORDERED"} + suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID) + portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID) + }, true}, + } + + for _, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + // run test for all types of ordering + for _, order := range []types.Order{types.UNORDERED, types.ORDERED} { + suite.SetupTest() // reset + tc.malleate() + + counterparty := types.NewCounterparty(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID, connB.FirstOrNextTestChannel(ibctesting.MockPort).ID) + channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort) + + channelID, cap, err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanOpenInit( + suite.chainA.GetContext(), order, []string{connA.ID}, + channelA.PortID, portCap, counterparty, channelA.Version, + ) + + // check if order is supported by channel to determine expected behaviour + orderSupported := false + for _, f := range features { + if f == order.String() { + orderSupported = true + } + } + + // Testcase must have expectedPass = true AND channel order supported before + // asserting the channel handshake initiation succeeded + if tc.expPass && orderSupported { + suite.Require().NoError(err) + suite.Require().NotNil(cap) + suite.Require().Equal(types.FormatChannelIdentifier(0), channelID) + + chanCap, ok := suite.chainA.App.ScopedIBCKeeper.GetCapability( + suite.chainA.GetContext(), + host.ChannelCapabilityPath(channelA.PortID, channelA.ID), + ) + suite.Require().True(ok, "could not retrieve channel capability after successful ChanOpenInit") + suite.Require().Equal(chanCap.String(), cap.String(), "channel capability is not correct") + } else { + suite.Require().Error(err) + suite.Require().Nil(cap) + suite.Require().Equal("", channelID) + } + } + }) + } +} + +// TestChanOpenTry tests the OpenTry handshake call for channels. It uses message passing +// to enter into the appropriate state and then calls ChanOpenTry directly. The channel +// is being created on chainB. The port capability must be created on chainB before +// ChanOpenTry can succeed. +func (suite *KeeperTestSuite) TestChanOpenTry() { + var ( + connA *ibctesting.TestConnection + connB *ibctesting.TestConnection + previousChannelID string + portCap *capabilitytypes.Capability + heightDiff uint64 + ) + + testCases := []testCase{ + {"success", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + + suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID) + portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID) + }, true}, + {"success with crossing hello", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + _, channelB, err := suite.coordinator.ChanOpenInitOnBothChains(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + previousChannelID = channelB.ID + portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID) + }, true}, + {"previous channel with invalid state", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + // make previous channel have wrong ordering + suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.UNORDERED) + }, false}, + {"connection doesn't exist", func() { + // any non-nil values of connA and connB are acceptable + suite.Require().NotNil(connA) + suite.Require().NotNil(connB) + + // pass capability check + suite.chainB.CreatePortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID) + portCap = suite.chainB.GetPortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID) + }, false}, + {"connection is not OPEN", func() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + // pass capability check + suite.chainB.CreatePortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID) + portCap = suite.chainB.GetPortCapability(connB.FirstOrNextTestChannel(ibctesting.MockPort).PortID) + + var err error + connB, connA, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA) + suite.Require().NoError(err) + }, false}, + {"consensus state not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + + suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID) + portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID) + + heightDiff = 3 // consensus state doesn't exist at this height + }, false}, + {"channel verification failed", func() { + // not creating a channel on chainA will result in an invalid proof of existence + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID) + }, false}, + {"port capability not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + + portCap = capabilitytypes.NewCapability(3) + }, false}, + {"connection version not negotiated", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + + // modify connB versions + conn := suite.chainB.GetConnection(connB) + + version := connectiontypes.NewVersion("2", []string{"ORDER_ORDERED", "ORDER_UNORDERED"}) + conn.Versions = append(conn.Versions, version) + + suite.chainB.App.IBCKeeper.ConnectionKeeper.SetConnection( + suite.chainB.GetContext(), + connB.ID, conn, + ) + suite.chainB.CreatePortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID) + portCap = suite.chainB.GetPortCapability(suite.chainB.NextTestChannel(connB, ibctesting.MockPort).PortID) + }, false}, + {"connection does not support ORDERED channels", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + + // modify connA versions to only support UNORDERED channels + conn := suite.chainA.GetConnection(connA) + + version := connectiontypes.NewVersion("1", []string{"ORDER_UNORDERED"}) + conn.Versions = []*connectiontypes.Version{version} + + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection( + suite.chainA.GetContext(), + connA.ID, conn, + ) + suite.chainA.CreatePortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID) + portCap = suite.chainA.GetPortCapability(suite.chainA.NextTestChannel(connA, ibctesting.MockPort).PortID) + }, false}, + } + + for _, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + heightDiff = 0 // must be explicitly changed in malleate + previousChannelID = "" + + tc.malleate() + channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort) + channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort) + counterparty := types.NewCounterparty(channelA.PortID, channelA.ID) + + channelKey := host.ChannelKey(counterparty.PortId, counterparty.ChannelId) + proof, proofHeight := suite.chainA.QueryProof(channelKey) + + channelID, cap, err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanOpenTry( + suite.chainB.GetContext(), types.ORDERED, []string{connB.ID}, + channelB.PortID, previousChannelID, portCap, counterparty, channelB.Version, connA.FirstOrNextTestChannel(ibctesting.MockPort).Version, + proof, malleateHeight(proofHeight, heightDiff), + ) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(cap) + + chanCap, ok := suite.chainB.App.ScopedIBCKeeper.GetCapability( + suite.chainB.GetContext(), + host.ChannelCapabilityPath(channelB.PortID, channelID), + ) + suite.Require().True(ok, "could not retrieve channel capapbility after successful ChanOpenTry") + suite.Require().Equal(chanCap.String(), cap.String(), "channel capability is not correct") + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestChanOpenAck tests the OpenAck handshake call for channels. It uses message passing +// to enter into the appropriate state and then calls ChanOpenAck directly. The handshake +// call is occurring on chainA. +func (suite *KeeperTestSuite) TestChanOpenAck() { + var ( + connA *ibctesting.TestConnection + connB *ibctesting.TestConnection + counterpartyChannelID string + channelCap *capabilitytypes.Capability + heightDiff uint64 + ) + + testCases := []testCase{ + {"success", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"success with empty stored counterparty channel ID", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + // set the channel's counterparty channel identifier to empty string + channel := suite.chainA.GetChannel(channelA) + channel.Counterparty.ChannelId = "" + + // use a different channel identifier + counterpartyChannelID = channelB.ID + + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel) + + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"channel doesn't exist", func() {}, false}, + {"channel state is not INIT or TRYOPEN", func() { + // create fully open channels on both chains + _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelA := connA.Channels[0] + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"connection not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + + // set the channel's connection hops to wrong connection ID + channel := suite.chainA.GetChannel(channelA) + channel.ConnectionHops[0] = "doesnotexist" + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel) + }, false}, + {"connection is not OPEN", func() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + var err error + connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // create channel in init + channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"consensus state not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + + heightDiff = 3 // consensus state doesn't exist at this height + }, false}, + {"invalid counterparty channel identifier", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + counterpartyChannelID = "otheridentifier" + + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel verification failed", func() { + // chainB is INIT, chainA in TRYOPEN + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelB, channelA, err := suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainA, suite.chainB, channelA, channelB, connA, types.ORDERED) + suite.Require().NoError(err) + + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel capability not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + + channelCap = capabilitytypes.NewCapability(6) + }, false}, + } + + for _, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + counterpartyChannelID = "" // must be explicitly changed in malleate + heightDiff = 0 // must be explicitly changed + + tc.malleate() + + channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort) + channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort) + + if counterpartyChannelID == "" { + counterpartyChannelID = channelB.ID + } + + channelKey := host.ChannelKey(channelB.PortID, channelB.ID) + proof, proofHeight := suite.chainB.QueryProof(channelKey) + + err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanOpenAck( + suite.chainA.GetContext(), channelA.PortID, channelA.ID, channelCap, channelB.Version, counterpartyChannelID, + proof, malleateHeight(proofHeight, heightDiff), + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestChanOpenConfirm tests the OpenAck handshake call for channels. It uses message passing +// to enter into the appropriate state and then calls ChanOpenConfirm directly. The handshake +// call is occurring on chainB. +func (suite *KeeperTestSuite) TestChanOpenConfirm() { + var ( + connA *ibctesting.TestConnection + connB *ibctesting.TestConnection + channelCap *capabilitytypes.Capability + heightDiff uint64 + ) + testCases := []testCase{ + {"success", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB) + suite.Require().NoError(err) + + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, true}, + {"channel doesn't exist", func() {}, false}, + {"channel state is not TRYOPEN", func() { + // create fully open channels on both cahins + _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelB := connB.Channels[0] + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"connection not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB) + suite.Require().NoError(err) + + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + + // set the channel's connection hops to wrong connection ID + channel := suite.chainB.GetChannel(channelB) + channel.ConnectionHops[0] = "doesnotexist" + suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainB.GetContext(), channelB.PortID, channelB.ID, channel) + }, false}, + {"connection is not OPEN", func() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + var err error + connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA) + suite.Require().NoError(err) + channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort) + suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"consensus state not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB) + suite.Require().NoError(err) + + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + + heightDiff = 3 + }, false}, + {"channel verification failed", func() { + // chainA is INIT, chainB in TRYOPEN + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"channel capability not found", func() { + _, _, connA, connB = suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenTry(suite.chainB, suite.chainA, channelB, channelA, connB, types.ORDERED) + suite.Require().NoError(err) + + err = suite.coordinator.ChanOpenAck(suite.chainA, suite.chainB, channelA, channelB) + suite.Require().NoError(err) + + channelCap = capabilitytypes.NewCapability(6) + }, false}, + } + + for _, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + heightDiff = 0 // must be explicitly changed + + tc.malleate() + + channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort) + channelB := connB.FirstOrNextTestChannel(ibctesting.MockPort) + + channelKey := host.ChannelKey(channelA.PortID, channelA.ID) + proof, proofHeight := suite.chainA.QueryProof(channelKey) + + err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanOpenConfirm( + suite.chainB.GetContext(), channelB.PortID, channelB.ID, + channelCap, proof, malleateHeight(proofHeight, heightDiff), + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestChanCloseInit tests the initial closing of a handshake on chainA by calling +// ChanCloseInit. Both chains will use message passing to setup OPEN channels. +func (suite *KeeperTestSuite) TestChanCloseInit() { + var ( + connA *ibctesting.TestConnection + connB *ibctesting.TestConnection + channelCap *capabilitytypes.Capability + ) + + testCases := []testCase{ + {"success", func() { + _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelA := connA.Channels[0] + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"channel doesn't exist", func() { + // any non-nil values work for connections + suite.Require().NotNil(connA) + suite.Require().NotNil(connB) + channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort) + + // ensure channel capability check passes + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel state is CLOSED", func() { + _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelA := connA.Channels[0] + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + + // close channel + err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA) + suite.Require().NoError(err) + }, false}, + {"connection not found", func() { + _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelA := connA.Channels[0] + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + + // set the channel's connection hops to wrong connection ID + channel := suite.chainA.GetChannel(channelA) + channel.ConnectionHops[0] = "doesnotexist" + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID, channel) + }, false}, + {"connection is not OPEN", func() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + var err error + connA, connB, err = suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + // create channel in init + channelA, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + + // ensure channel capability check passes + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel capability not found", func() { + _, _, connA, connB, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelCap = capabilitytypes.NewCapability(3) + }, false}, + } + + for _, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + + tc.malleate() + + channelA := connA.FirstOrNextTestChannel(ibctesting.MockPort) + + err := suite.chainA.App.IBCKeeper.ChannelKeeper.ChanCloseInit( + suite.chainA.GetContext(), channelA.PortID, channelA.ID, channelCap, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestChanCloseConfirm tests the confirming closing channel ends by calling ChanCloseConfirm +// on chainB. Both chains will use message passing to setup OPEN channels. ChanCloseInit is +// bypassed on chainA by setting the channel state in the ChannelKeeper. +func (suite *KeeperTestSuite) TestChanCloseConfirm() { + var ( + connA *ibctesting.TestConnection + connB *ibctesting.TestConnection + channelA ibctesting.TestChannel + channelB ibctesting.TestChannel + channelCap *capabilitytypes.Capability + heightDiff uint64 + ) + + testCases := []testCase{ + {"success", func() { + _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + + err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA) + suite.Require().NoError(err) + }, true}, + {"channel doesn't exist", func() { + // any non-nil values work for connections + suite.Require().NotNil(connA) + suite.Require().NotNil(connB) + channelB = connB.FirstOrNextTestChannel(ibctesting.MockPort) + + // ensure channel capability check passes + suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"channel state is CLOSED", func() { + _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + + err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + suite.Require().NoError(err) + }, false}, + {"connection not found", func() { + _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + + // set the channel's connection hops to wrong connection ID + channel := suite.chainB.GetChannel(channelB) + channel.ConnectionHops[0] = "doesnotexist" + suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel(suite.chainB.GetContext(), channelB.PortID, channelB.ID, channel) + }, false}, + {"connection is not OPEN", func() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + var err error + connB, connA, err = suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA) + suite.Require().NoError(err) + + // create channel in init + channelB, _, err := suite.coordinator.ChanOpenInit(suite.chainB, suite.chainA, connB, connA, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.Require().NoError(err) + + // ensure channel capability check passes + suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"consensus state not found", func() { + _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + + err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA) + suite.Require().NoError(err) + + heightDiff = 3 + }, false}, + {"channel verification failed", func() { + // channel not closed + _, _, connA, connB, _, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"channel capability not found", func() { + _, _, connA, connB, channelA, channelB = suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + + err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA) + suite.Require().NoError(err) + + channelCap = capabilitytypes.NewCapability(3) + }, false}, + } + + for _, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() // reset + heightDiff = 0 // must explicitly be changed + + tc.malleate() + + channelA = connA.FirstOrNextTestChannel(ibctesting.MockPort) + channelB = connB.FirstOrNextTestChannel(ibctesting.MockPort) + + channelKey := host.ChannelKey(channelA.PortID, channelA.ID) + proof, proofHeight := suite.chainA.QueryProof(channelKey) + + err := suite.chainB.App.IBCKeeper.ChannelKeeper.ChanCloseConfirm( + suite.chainB.GetContext(), channelB.PortID, channelB.ID, channelCap, + proof, malleateHeight(proofHeight, heightDiff), + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func malleateHeight(height exported.Height, diff uint64) exported.Height { + return clienttypes.NewHeight(height.GetRevisionNumber(), height.GetRevisionHeight()+diff) +} diff --git a/core/04-channel/keeper/keeper.go b/core/04-channel/keeper/keeper.go new file mode 100644 index 0000000000..60452f315b --- /dev/null +++ b/core/04-channel/keeper/keeper.go @@ -0,0 +1,432 @@ +package keeper + +import ( + "strconv" + "strings" + + "github.com/tendermint/tendermint/libs/log" + db "github.com/tendermint/tm-db" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// Keeper defines the IBC channel keeper +type Keeper struct { + // implements gRPC QueryServer interface + types.QueryServer + + storeKey sdk.StoreKey + cdc codec.BinaryMarshaler + clientKeeper types.ClientKeeper + connectionKeeper types.ConnectionKeeper + portKeeper types.PortKeeper + scopedKeeper capabilitykeeper.ScopedKeeper +} + +// NewKeeper creates a new IBC channel Keeper instance +func NewKeeper( + cdc codec.BinaryMarshaler, key sdk.StoreKey, + clientKeeper types.ClientKeeper, connectionKeeper types.ConnectionKeeper, + portKeeper types.PortKeeper, scopedKeeper capabilitykeeper.ScopedKeeper, +) Keeper { + return Keeper{ + storeKey: key, + cdc: cdc, + clientKeeper: clientKeeper, + connectionKeeper: connectionKeeper, + portKeeper: portKeeper, + scopedKeeper: scopedKeeper, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName) +} + +// GenerateChannelIdentifier returns the next channel identifier. +func (k Keeper) GenerateChannelIdentifier(ctx sdk.Context) string { + nextChannelSeq := k.GetNextChannelSequence(ctx) + channelID := types.FormatChannelIdentifier(nextChannelSeq) + + nextChannelSeq++ + k.SetNextChannelSequence(ctx, nextChannelSeq) + return channelID +} + +// GetChannel returns a channel with a particular identifier binded to a specific port +func (k Keeper) GetChannel(ctx sdk.Context, portID, channelID string) (types.Channel, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.ChannelKey(portID, channelID)) + if bz == nil { + return types.Channel{}, false + } + + var channel types.Channel + k.cdc.MustUnmarshalBinaryBare(bz, &channel) + return channel, true +} + +// SetChannel sets a channel to the store +func (k Keeper) SetChannel(ctx sdk.Context, portID, channelID string, channel types.Channel) { + store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshalBinaryBare(&channel) + store.Set(host.ChannelKey(portID, channelID), bz) +} + +// GetNextChannelSequence gets the next channel sequence from the store. +func (k Keeper) GetNextChannelSequence(ctx sdk.Context) uint64 { + store := ctx.KVStore(k.storeKey) + bz := store.Get([]byte(types.KeyNextChannelSequence)) + if bz == nil { + panic("next channel sequence is nil") + } + + return sdk.BigEndianToUint64(bz) +} + +// SetNextChannelSequence sets the next channel sequence to the store. +func (k Keeper) SetNextChannelSequence(ctx sdk.Context, sequence uint64) { + store := ctx.KVStore(k.storeKey) + bz := sdk.Uint64ToBigEndian(sequence) + store.Set([]byte(types.KeyNextChannelSequence), bz) +} + +// GetNextSequenceSend gets a channel's next send sequence from the store +func (k Keeper) GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.NextSequenceSendKey(portID, channelID)) + if bz == nil { + return 0, false + } + + return sdk.BigEndianToUint64(bz), true +} + +// SetNextSequenceSend sets a channel's next send sequence to the store +func (k Keeper) SetNextSequenceSend(ctx sdk.Context, portID, channelID string, sequence uint64) { + store := ctx.KVStore(k.storeKey) + bz := sdk.Uint64ToBigEndian(sequence) + store.Set(host.NextSequenceSendKey(portID, channelID), bz) +} + +// GetNextSequenceRecv gets a channel's next receive sequence from the store +func (k Keeper) GetNextSequenceRecv(ctx sdk.Context, portID, channelID string) (uint64, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.NextSequenceRecvKey(portID, channelID)) + if bz == nil { + return 0, false + } + + return sdk.BigEndianToUint64(bz), true +} + +// SetNextSequenceRecv sets a channel's next receive sequence to the store +func (k Keeper) SetNextSequenceRecv(ctx sdk.Context, portID, channelID string, sequence uint64) { + store := ctx.KVStore(k.storeKey) + bz := sdk.Uint64ToBigEndian(sequence) + store.Set(host.NextSequenceRecvKey(portID, channelID), bz) +} + +// GetNextSequenceAck gets a channel's next ack sequence from the store +func (k Keeper) GetNextSequenceAck(ctx sdk.Context, portID, channelID string) (uint64, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.NextSequenceAckKey(portID, channelID)) + if bz == nil { + return 0, false + } + + return sdk.BigEndianToUint64(bz), true +} + +// SetNextSequenceAck sets a channel's next ack sequence to the store +func (k Keeper) SetNextSequenceAck(ctx sdk.Context, portID, channelID string, sequence uint64) { + store := ctx.KVStore(k.storeKey) + bz := sdk.Uint64ToBigEndian(sequence) + store.Set(host.NextSequenceAckKey(portID, channelID), bz) +} + +// GetPacketReceipt gets a packet receipt from the store +func (k Keeper) GetPacketReceipt(ctx sdk.Context, portID, channelID string, sequence uint64) (string, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.PacketReceiptKey(portID, channelID, sequence)) + if bz == nil { + return "", false + } + + return string(bz), true +} + +// SetPacketReceipt sets an empty packet receipt to the store +func (k Keeper) SetPacketReceipt(ctx sdk.Context, portID, channelID string, sequence uint64) { + store := ctx.KVStore(k.storeKey) + store.Set(host.PacketReceiptKey(portID, channelID, sequence), []byte{byte(1)}) +} + +// GetPacketCommitment gets the packet commitment hash from the store +func (k Keeper) GetPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) []byte { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.PacketCommitmentKey(portID, channelID, sequence)) + return bz +} + +// HasPacketCommitment returns true if the packet commitment exists +func (k Keeper) HasPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) bool { + store := ctx.KVStore(k.storeKey) + return store.Has(host.PacketCommitmentKey(portID, channelID, sequence)) +} + +// SetPacketCommitment sets the packet commitment hash to the store +func (k Keeper) SetPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64, commitmentHash []byte) { + store := ctx.KVStore(k.storeKey) + store.Set(host.PacketCommitmentKey(portID, channelID, sequence), commitmentHash) +} + +func (k Keeper) deletePacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) { + store := ctx.KVStore(k.storeKey) + store.Delete(host.PacketCommitmentKey(portID, channelID, sequence)) +} + +// SetPacketAcknowledgement sets the packet ack hash to the store +func (k Keeper) SetPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64, ackHash []byte) { + store := ctx.KVStore(k.storeKey) + store.Set(host.PacketAcknowledgementKey(portID, channelID, sequence), ackHash) +} + +// GetPacketAcknowledgement gets the packet ack hash from the store +func (k Keeper) GetPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64) ([]byte, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(host.PacketAcknowledgementKey(portID, channelID, sequence)) + if bz == nil { + return nil, false + } + return bz, true +} + +// HasPacketAcknowledgement check if the packet ack hash is already on the store +func (k Keeper) HasPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64) bool { + store := ctx.KVStore(k.storeKey) + return store.Has(host.PacketAcknowledgementKey(portID, channelID, sequence)) +} + +// IteratePacketSequence provides an iterator over all send, receive or ack sequences. +// For each sequence, cb will be called. If the cb returns true, the iterator +// will close and stop. +func (k Keeper) IteratePacketSequence(ctx sdk.Context, iterator db.Iterator, cb func(portID, channelID string, sequence uint64) bool) { + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + portID, channelID, err := host.ParseChannelPath(string(iterator.Key())) + if err != nil { + // return if the key is not a channel key + return + } + + sequence := sdk.BigEndianToUint64(iterator.Value()) + + if cb(portID, channelID, sequence) { + break + } + } +} + +// GetAllPacketSendSeqs returns all stored next send sequences. +func (k Keeper) GetAllPacketSendSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqSendPrefix)) + k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextSendSeq uint64) bool { + ps := types.NewPacketSequence(portID, channelID, nextSendSeq) + seqs = append(seqs, ps) + return false + }) + return seqs +} + +// GetAllPacketRecvSeqs returns all stored next recv sequences. +func (k Keeper) GetAllPacketRecvSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqRecvPrefix)) + k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextRecvSeq uint64) bool { + ps := types.NewPacketSequence(portID, channelID, nextRecvSeq) + seqs = append(seqs, ps) + return false + }) + return seqs +} + +// GetAllPacketAckSeqs returns all stored next acknowledgements sequences. +func (k Keeper) GetAllPacketAckSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqAckPrefix)) + k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextAckSeq uint64) bool { + ps := types.NewPacketSequence(portID, channelID, nextAckSeq) + seqs = append(seqs, ps) + return false + }) + return seqs +} + +// IteratePacketCommitment provides an iterator over all PacketCommitment objects. For each +// packet commitment, cb will be called. If the cb returns true, the iterator will close +// and stop. +func (k Keeper) IteratePacketCommitment(ctx sdk.Context, cb func(portID, channelID string, sequence uint64, hash []byte) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyPacketCommitmentPrefix)) + k.iterateHashes(ctx, iterator, cb) +} + +// GetAllPacketCommitments returns all stored PacketCommitments objects. +func (k Keeper) GetAllPacketCommitments(ctx sdk.Context) (commitments []types.PacketState) { + k.IteratePacketCommitment(ctx, func(portID, channelID string, sequence uint64, hash []byte) bool { + pc := types.NewPacketState(portID, channelID, sequence, hash) + commitments = append(commitments, pc) + return false + }) + return commitments +} + +// IteratePacketCommitmentAtChannel provides an iterator over all PacketCommmitment objects +// at a specified channel. For each packet commitment, cb will be called. If the cb returns +// true, the iterator will close and stop. +func (k Keeper) IteratePacketCommitmentAtChannel(ctx sdk.Context, portID, channelID string, cb func(_, _ string, sequence uint64, hash []byte) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.PacketCommitmentPrefixPath(portID, channelID))) + k.iterateHashes(ctx, iterator, cb) +} + +// GetAllPacketCommitmentsAtChannel returns all stored PacketCommitments objects for a specified +// port ID and channel ID. +func (k Keeper) GetAllPacketCommitmentsAtChannel(ctx sdk.Context, portID, channelID string) (commitments []types.PacketState) { + k.IteratePacketCommitmentAtChannel(ctx, portID, channelID, func(_, _ string, sequence uint64, hash []byte) bool { + pc := types.NewPacketState(portID, channelID, sequence, hash) + commitments = append(commitments, pc) + return false + }) + return commitments +} + +// IteratePacketReceipt provides an iterator over all PacketReceipt objects. For each +// receipt, cb will be called. If the cb returns true, the iterator will close +// and stop. +func (k Keeper) IteratePacketReceipt(ctx sdk.Context, cb func(portID, channelID string, sequence uint64, receipt []byte) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyPacketReceiptPrefix)) + k.iterateHashes(ctx, iterator, cb) +} + +// GetAllPacketReceipts returns all stored PacketReceipt objects. +func (k Keeper) GetAllPacketReceipts(ctx sdk.Context) (receipts []types.PacketState) { + k.IteratePacketReceipt(ctx, func(portID, channelID string, sequence uint64, receipt []byte) bool { + packetReceipt := types.NewPacketState(portID, channelID, sequence, receipt) + receipts = append(receipts, packetReceipt) + return false + }) + return receipts +} + +// IteratePacketAcknowledgement provides an iterator over all PacketAcknowledgement objects. For each +// aknowledgement, cb will be called. If the cb returns true, the iterator will close +// and stop. +func (k Keeper) IteratePacketAcknowledgement(ctx sdk.Context, cb func(portID, channelID string, sequence uint64, hash []byte) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyPacketAckPrefix)) + k.iterateHashes(ctx, iterator, cb) +} + +// GetAllPacketAcks returns all stored PacketAcknowledgements objects. +func (k Keeper) GetAllPacketAcks(ctx sdk.Context) (acks []types.PacketState) { + k.IteratePacketAcknowledgement(ctx, func(portID, channelID string, sequence uint64, ack []byte) bool { + packetAck := types.NewPacketState(portID, channelID, sequence, ack) + acks = append(acks, packetAck) + return false + }) + return acks +} + +// IterateChannels provides an iterator over all Channel objects. For each +// Channel, cb will be called. If the cb returns true, the iterator will close +// and stop. +func (k Keeper) IterateChannels(ctx sdk.Context, cb func(types.IdentifiedChannel) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyChannelEndPrefix)) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + var channel types.Channel + k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &channel) + + portID, channelID := host.MustParseChannelPath(string(iterator.Key())) + identifiedChannel := types.NewIdentifiedChannel(portID, channelID, channel) + if cb(identifiedChannel) { + break + } + } +} + +// GetAllChannels returns all stored Channel objects. +func (k Keeper) GetAllChannels(ctx sdk.Context) (channels []types.IdentifiedChannel) { + k.IterateChannels(ctx, func(channel types.IdentifiedChannel) bool { + channels = append(channels, channel) + return false + }) + return channels +} + +// GetChannelClientState returns the associated client state with its ID, from a port and channel identifier. +func (k Keeper) GetChannelClientState(ctx sdk.Context, portID, channelID string) (string, exported.ClientState, error) { + channel, found := k.GetChannel(ctx, portID, channelID) + if !found { + return "", nil, sdkerrors.Wrapf(types.ErrChannelNotFound, "port-id: %s, channel-id: %s", portID, channelID) + } + + connection, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return "", nil, sdkerrors.Wrapf(connectiontypes.ErrConnectionNotFound, "connection-id: %s", channel.ConnectionHops[0]) + } + + clientState, found := k.clientKeeper.GetClientState(ctx, connection.ClientId) + if !found { + return "", nil, sdkerrors.Wrapf(clienttypes.ErrClientNotFound, "client-id: %s", connection.ClientId) + } + + return connection.ClientId, clientState, nil +} + +// LookupModuleByChannel will return the IBCModule along with the capability associated with a given channel defined by its portID and channelID +func (k Keeper) LookupModuleByChannel(ctx sdk.Context, portID, channelID string) (string, *capabilitytypes.Capability, error) { + modules, cap, err := k.scopedKeeper.LookupModules(ctx, host.ChannelCapabilityPath(portID, channelID)) + if err != nil { + return "", nil, err + } + + return porttypes.GetModuleOwner(modules), cap, nil +} + +// common functionality for IteratePacketCommitment and IteratePacketAcknowledgement +func (k Keeper) iterateHashes(_ sdk.Context, iterator db.Iterator, cb func(portID, channelID string, sequence uint64, hash []byte) bool) { + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + portID := keySplit[2] + channelID := keySplit[4] + + sequence, err := strconv.ParseUint(keySplit[len(keySplit)-1], 10, 64) + if err != nil { + panic(err) + } + + if cb(portID, channelID, sequence, iterator.Value()) { + break + } + } +} diff --git a/core/04-channel/keeper/keeper_test.go b/core/04-channel/keeper/keeper_test.go new file mode 100644 index 0000000000..a9b7dd6cf1 --- /dev/null +++ b/core/04-channel/keeper/keeper_test.go @@ -0,0 +1,329 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +// KeeperTestSuite is a testing suite to test keeper functions. +type KeeperTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +// TestKeeperTestSuite runs all the tests within this package. +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} + +// SetupTest creates a coordinator with 2 test chains. +func (suite *KeeperTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) + suite.coordinator.CommitNBlocks(suite.chainA, 2) + suite.coordinator.CommitNBlocks(suite.chainB, 2) +} + +// TestSetChannel create clients and connections on both chains. It tests for the non-existence +// and existence of a channel in INIT on chainA. +func (suite *KeeperTestSuite) TestSetChannel() { + // create client and connections on both chains + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + + // check for channel to be created on chainA + channelA := suite.chainA.NextTestChannel(connA, ibctesting.MockPort) + _, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID) + suite.False(found) + + // init channel + channelA, channelB, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA, connB, ibctesting.MockPort, ibctesting.MockPort, types.ORDERED) + suite.NoError(err) + + storedChannel, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), channelA.PortID, channelA.ID) + // counterparty channel id is empty after open init + expectedCounterparty := types.NewCounterparty(channelB.PortID, "") + + suite.True(found) + suite.Equal(types.INIT, storedChannel.State) + suite.Equal(types.ORDERED, storedChannel.Ordering) + suite.Equal(expectedCounterparty, storedChannel.Counterparty) +} + +// TestGetAllChannels creates multiple channels on chain A through various connections +// and tests their retrieval. 2 channels are on connA0 and 1 channel is on connA1 +func (suite KeeperTestSuite) TestGetAllChannels() { + clientA, clientB, connA0, connB0, testchannel0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // channel0 on first connection on chainA + counterparty0 := types.Counterparty{ + PortId: connB0.Channels[0].PortID, + ChannelId: connB0.Channels[0].ID, + } + + // channel1 is second channel on first connection on chainA + testchannel1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA0, connB0, types.ORDERED) + counterparty1 := types.Counterparty{ + PortId: connB0.Channels[1].PortID, + ChannelId: connB0.Channels[1].ID, + } + + connA1, connB1 := suite.coordinator.CreateConnection(suite.chainA, suite.chainB, clientA, clientB) + + // channel2 is on a second connection on chainA + testchannel2, _, err := suite.coordinator.ChanOpenInit(suite.chainA, suite.chainB, connA1, connB1, ibctesting.MockPort, ibctesting.MockPort, types.UNORDERED) + suite.Require().NoError(err) + + // counterparty channel id is empty after open init + counterparty2 := types.Counterparty{ + PortId: connB1.Channels[0].PortID, + ChannelId: "", + } + + channel0 := types.NewChannel( + types.OPEN, types.UNORDERED, + counterparty0, []string{connA0.ID}, testchannel0.Version, + ) + channel1 := types.NewChannel( + types.OPEN, types.ORDERED, + counterparty1, []string{connA0.ID}, testchannel1.Version, + ) + channel2 := types.NewChannel( + types.INIT, types.UNORDERED, + counterparty2, []string{connA1.ID}, testchannel2.Version, + ) + + expChannels := []types.IdentifiedChannel{ + types.NewIdentifiedChannel(testchannel0.PortID, testchannel0.ID, channel0), + types.NewIdentifiedChannel(testchannel1.PortID, testchannel1.ID, channel1), + types.NewIdentifiedChannel(testchannel2.PortID, testchannel2.ID, channel2), + } + + ctxA := suite.chainA.GetContext() + + channels := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllChannels(ctxA) + suite.Require().Len(channels, len(expChannels)) + suite.Require().Equal(expChannels, channels) +} + +// TestGetAllSequences sets all packet sequences for two different channels on chain A and +// tests their retrieval. +func (suite KeeperTestSuite) TestGetAllSequences() { + _, _, connA, connB, channelA0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED) + + seq1 := types.NewPacketSequence(channelA0.PortID, channelA0.ID, 1) + seq2 := types.NewPacketSequence(channelA0.PortID, channelA0.ID, 2) + seq3 := types.NewPacketSequence(channelA1.PortID, channelA1.ID, 3) + + // seq1 should be overwritten by seq2 + expSeqs := []types.PacketSequence{seq2, seq3} + + ctxA := suite.chainA.GetContext() + + for _, seq := range []types.PacketSequence{seq1, seq2, seq3} { + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) + } + + sendSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketSendSeqs(ctxA) + recvSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketRecvSeqs(ctxA) + ackSeqs := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketAckSeqs(ctxA) + suite.Len(sendSeqs, 2) + suite.Len(recvSeqs, 2) + suite.Len(ackSeqs, 2) + + suite.Equal(expSeqs, sendSeqs) + suite.Equal(expSeqs, recvSeqs) + suite.Equal(expSeqs, ackSeqs) +} + +// TestGetAllPacketState creates a set of acks, packet commitments, and receipts on two different +// channels on chain A and tests their retrieval. +func (suite KeeperTestSuite) TestGetAllPacketState() { + _, _, connA, connB, channelA0, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED) + + // channel 0 acks + ack1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte("ack")) + ack2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("ack")) + + // duplicate ack + ack2dup := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("ack")) + + // channel 1 acks + ack3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte("ack")) + + // create channel 0 receipts + receipt := string([]byte{byte(1)}) + rec1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte(receipt)) + rec2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte(receipt)) + + // channel 1 receipts + rec3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte(receipt)) + rec4 := types.NewPacketState(channelA1.PortID, channelA1.ID, 2, []byte(receipt)) + + // channel 0 packet commitments + comm1 := types.NewPacketState(channelA0.PortID, channelA0.ID, 1, []byte("hash")) + comm2 := types.NewPacketState(channelA0.PortID, channelA0.ID, 2, []byte("hash")) + + // channel 1 packet commitments + comm3 := types.NewPacketState(channelA1.PortID, channelA1.ID, 1, []byte("hash")) + comm4 := types.NewPacketState(channelA1.PortID, channelA1.ID, 2, []byte("hash")) + + expAcks := []types.PacketState{ack1, ack2, ack3} + expReceipts := []types.PacketState{rec1, rec2, rec3, rec4} + expCommitments := []types.PacketState{comm1, comm2, comm3, comm4} + + ctxA := suite.chainA.GetContext() + + // set acknowledgements + for _, ack := range []types.PacketState{ack1, ack2, ack2dup, ack3} { + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(ctxA, ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) + } + + // set packet receipts + for _, rec := range expReceipts { + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(ctxA, rec.PortId, rec.ChannelId, rec.Sequence) + } + + // set packet commitments + for _, comm := range expCommitments { + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, comm.PortId, comm.ChannelId, comm.Sequence, comm.Data) + } + + acks := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketAcks(ctxA) + receipts := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketReceipts(ctxA) + commitments := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketCommitments(ctxA) + + suite.Require().Len(acks, len(expAcks)) + suite.Require().Len(commitments, len(expCommitments)) + suite.Require().Len(receipts, len(expReceipts)) + + suite.Require().Equal(expAcks, acks) + suite.Require().Equal(expReceipts, receipts) + suite.Require().Equal(expCommitments, commitments) +} + +// TestSetSequence verifies that the keeper correctly sets the sequence counters. +func (suite *KeeperTestSuite) TestSetSequence() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + ctxA := suite.chainA.GetContext() + one := uint64(1) + + // initialized channel has next send seq of 1 + seq, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID) + suite.True(found) + suite.Equal(one, seq) + + // initialized channel has next seq recv of 1 + seq, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceRecv(ctxA, channelA.PortID, channelA.ID) + suite.True(found) + suite.Equal(one, seq) + + // initialized channel has next seq ack of + seq, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(ctxA, channelA.PortID, channelA.ID) + suite.True(found) + suite.Equal(one, seq) + + nextSeqSend, nextSeqRecv, nextSeqAck := uint64(10), uint64(10), uint64(10) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(ctxA, channelA.PortID, channelA.ID, nextSeqSend) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceRecv(ctxA, channelA.PortID, channelA.ID, nextSeqRecv) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(ctxA, channelA.PortID, channelA.ID, nextSeqAck) + + storedNextSeqSend, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID) + suite.True(found) + suite.Equal(nextSeqSend, storedNextSeqSend) + + storedNextSeqRecv, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceSend(ctxA, channelA.PortID, channelA.ID) + suite.True(found) + suite.Equal(nextSeqRecv, storedNextSeqRecv) + + storedNextSeqAck, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(ctxA, channelA.PortID, channelA.ID) + suite.True(found) + suite.Equal(nextSeqAck, storedNextSeqAck) +} + +// TestGetAllPacketCommitmentsAtChannel verifies that the keeper returns all stored packet +// commitments for a specific channel. The test will store consecutive commitments up to the +// value of "seq" and then add non-consecutive up to the value of "maxSeq". A final commitment +// with the value maxSeq + 1 is set on a different channel. +func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() { + _, _, connA, connB, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + // create second channel + channelA1, _ := suite.coordinator.CreateMockChannels(suite.chainA, suite.chainB, connA, connB, types.UNORDERED) + + ctxA := suite.chainA.GetContext() + expectedSeqs := make(map[uint64]bool) + hash := []byte("commitment") + + seq := uint64(15) + maxSeq := uint64(25) + suite.Require().Greater(maxSeq, seq) + + // create consecutive commitments + for i := uint64(1); i < seq; i++ { + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA.PortID, channelA.ID, i, hash) + expectedSeqs[i] = true + } + + // add non-consecutive commitments + for i := seq; i < maxSeq; i += 2 { + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA.PortID, channelA.ID, i, hash) + expectedSeqs[i] = true + } + + // add sequence on different channel/port + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(ctxA, channelA1.PortID, channelA1.ID, maxSeq+1, hash) + + commitments := suite.chainA.App.IBCKeeper.ChannelKeeper.GetAllPacketCommitmentsAtChannel(ctxA, channelA.PortID, channelA.ID) + + suite.Equal(len(expectedSeqs), len(commitments)) + // ensure above for loops occurred + suite.NotEqual(0, len(commitments)) + + // verify that all the packet commitments were stored + for _, packet := range commitments { + suite.True(expectedSeqs[packet.Sequence]) + suite.Equal(channelA.PortID, packet.PortId) + suite.Equal(channelA.ID, packet.ChannelId) + suite.Equal(hash, packet.Data) + + // prevent duplicates from passing checks + expectedSeqs[packet.Sequence] = false + } +} + +// TestSetPacketAcknowledgement verifies that packet acknowledgements are correctly +// set in the keeper. +func (suite *KeeperTestSuite) TestSetPacketAcknowledgement() { + _, _, _, _, channelA, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + ctxA := suite.chainA.GetContext() + seq := uint64(10) + + storedAckHash, found := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq) + suite.Require().False(found) + suite.Require().Nil(storedAckHash) + + ackHash := []byte("ackhash") + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq, ackHash) + + storedAckHash, found = suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq) + suite.Require().True(found) + suite.Require().Equal(ackHash, storedAckHash) + suite.Require().True(suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketAcknowledgement(ctxA, channelA.PortID, channelA.ID, seq)) +} diff --git a/core/04-channel/keeper/packet.go b/core/04-channel/keeper/packet.go new file mode 100644 index 0000000000..49b59733c5 --- /dev/null +++ b/core/04-channel/keeper/packet.go @@ -0,0 +1,528 @@ +package keeper + +import ( + "bytes" + "fmt" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// SendPacket is called by a module in order to send an IBC packet on a channel +// end owned by the calling module to the corresponding module on the counterparty +// chain. +func (k Keeper) SendPacket( + ctx sdk.Context, + channelCap *capabilitytypes.Capability, + packet exported.PacketI, +) error { + if err := packet.ValidateBasic(); err != nil { + return sdkerrors.Wrap(err, "packet failed basic validation") + } + + channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel()) + if !found { + return sdkerrors.Wrap(types.ErrChannelNotFound, packet.GetSourceChannel()) + } + + if channel.State == types.CLOSED { + return sdkerrors.Wrapf( + types.ErrInvalidChannelState, + "channel is CLOSED (got %s)", channel.State.String(), + ) + } + + if !k.scopedKeeper.AuthenticateCapability(ctx, channelCap, host.ChannelCapabilityPath(packet.GetSourcePort(), packet.GetSourceChannel())) { + return sdkerrors.Wrapf(types.ErrChannelCapabilityNotFound, "caller does not own capability for channel, port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel()) + } + + if packet.GetDestPort() != channel.Counterparty.PortId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet destination port doesn't match the counterparty's port (%s ≠ %s)", packet.GetDestPort(), channel.Counterparty.PortId, + ) + } + + if packet.GetDestChannel() != channel.Counterparty.ChannelId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet destination channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetDestChannel(), channel.Counterparty.ChannelId, + ) + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0]) + } + + clientState, found := k.clientKeeper.GetClientState(ctx, connectionEnd.GetClientID()) + if !found { + return clienttypes.ErrConsensusStateNotFound + } + + // prevent accidental sends with clients that cannot be updated + if clientState.IsFrozen() { + return sdkerrors.Wrapf(clienttypes.ErrClientFrozen, "cannot send packet on a frozen client with ID %s", connectionEnd.GetClientID()) + } + + // check if packet timeouted on the receiving chain + latestHeight := clientState.GetLatestHeight() + timeoutHeight := packet.GetTimeoutHeight() + if !timeoutHeight.IsZero() && latestHeight.GTE(timeoutHeight) { + return sdkerrors.Wrapf( + types.ErrPacketTimeout, + "receiving chain block height >= packet timeout height (%s >= %s)", latestHeight, timeoutHeight, + ) + } + + latestTimestamp, err := k.connectionKeeper.GetTimestampAtHeight(ctx, connectionEnd, latestHeight) + if err != nil { + return err + } + + if packet.GetTimeoutTimestamp() != 0 && latestTimestamp >= packet.GetTimeoutTimestamp() { + return sdkerrors.Wrapf( + types.ErrPacketTimeout, + "receiving chain block timestamp >= packet timeout timestamp (%s >= %s)", time.Unix(0, int64(latestTimestamp)), time.Unix(0, int64(packet.GetTimeoutTimestamp())), + ) + } + + nextSequenceSend, found := k.GetNextSequenceSend(ctx, packet.GetSourcePort(), packet.GetSourceChannel()) + if !found { + return sdkerrors.Wrapf( + types.ErrSequenceSendNotFound, + "source port: %s, source channel: %s", packet.GetSourcePort(), packet.GetSourceChannel(), + ) + } + + if packet.GetSequence() != nextSequenceSend { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet sequence ≠ next send sequence (%d ≠ %d)", packet.GetSequence(), nextSequenceSend, + ) + } + + commitment := types.CommitPacket(k.cdc, packet) + + nextSequenceSend++ + k.SetNextSequenceSend(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), nextSequenceSend) + k.SetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment) + + // Emit Event with Packet data along with other packet information for relayer to pick up + // and relay to other chain + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeSendPacket, + sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())), + sdk.NewAttribute(types.AttributeKeyTimeoutHeight, timeoutHeight.String()), + sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())), + sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())), + sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()), + sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()), + sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()), + sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()), + sdk.NewAttribute(types.AttributeKeyChannelOrdering, channel.Ordering.String()), + // we only support 1-hop packets now, and that is the most important hop for a relayer + // (is it going to a chain I am connected to) + sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + k.Logger(ctx).Info("packet sent", "packet", fmt.Sprintf("%v", packet)) + return nil +} + +// RecvPacket is called by a module in order to receive & process an IBC packet +// sent on the corresponding channel end on the counterparty chain. +func (k Keeper) RecvPacket( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, + proof []byte, + proofHeight exported.Height, +) error { + channel, found := k.GetChannel(ctx, packet.GetDestPort(), packet.GetDestChannel()) + if !found { + return sdkerrors.Wrap(types.ErrChannelNotFound, packet.GetDestChannel()) + } + + if channel.State != types.OPEN { + return sdkerrors.Wrapf( + types.ErrInvalidChannelState, + "channel state is not OPEN (got %s)", channel.State.String(), + ) + } + + // Authenticate capability to ensure caller has authority to receive packet on this channel + capName := host.ChannelCapabilityPath(packet.GetDestPort(), packet.GetDestChannel()) + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) { + return sdkerrors.Wrapf( + types.ErrInvalidChannelCapability, + "channel capability failed authentication for capability name %s", capName, + ) + } + + // packet must come from the channel's counterparty + if packet.GetSourcePort() != channel.Counterparty.PortId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet source port doesn't match the counterparty's port (%s ≠ %s)", packet.GetSourcePort(), channel.Counterparty.PortId, + ) + } + + if packet.GetSourceChannel() != channel.Counterparty.ChannelId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet source channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetSourceChannel(), channel.Counterparty.ChannelId, + ) + } + + // Connection must be OPEN to receive a packet. It is possible for connection to not yet be open if packet was + // sent optimistically before connection and channel handshake completed. However, to receive a packet, + // connection and channel must both be open + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0]) + } + + if connectionEnd.GetState() != int32(connectiontypes.OPEN) { + return sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnectionState, + "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(), + ) + } + + // check if packet timeouted by comparing it with the latest height of the chain + selfHeight := clienttypes.GetSelfHeight(ctx) + timeoutHeight := packet.GetTimeoutHeight() + if !timeoutHeight.IsZero() && selfHeight.GTE(timeoutHeight) { + return sdkerrors.Wrapf( + types.ErrPacketTimeout, + "block height >= packet timeout height (%s >= %s)", selfHeight, timeoutHeight, + ) + } + + // check if packet timeouted by comparing it with the latest timestamp of the chain + if packet.GetTimeoutTimestamp() != 0 && uint64(ctx.BlockTime().UnixNano()) >= packet.GetTimeoutTimestamp() { + return sdkerrors.Wrapf( + types.ErrPacketTimeout, + "block timestamp >= packet timeout timestamp (%s >= %s)", ctx.BlockTime(), time.Unix(0, int64(packet.GetTimeoutTimestamp())), + ) + } + + commitment := types.CommitPacket(k.cdc, packet) + + // verify that the counterparty did commit to sending this packet + if err := k.connectionKeeper.VerifyPacketCommitment( + ctx, connectionEnd, proofHeight, proof, + packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), + commitment, + ); err != nil { + return sdkerrors.Wrap(err, "couldn't verify counterparty packet commitment") + } + + switch channel.Ordering { + case types.UNORDERED: + // check if the packet receipt has been received already for unordered channels + _, found := k.GetPacketReceipt(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + if found { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet sequence (%d) already has been received", packet.GetSequence(), + ) + } + + // All verification complete, update state + // For unordered channels we must set the receipt so it can be verified on the other side. + // This receipt does not contain any data, since the packet has not yet been processed, + // it's just a single store key set to an empty string to indicate that the packet has been received + k.SetPacketReceipt(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + + case types.ORDERED: + // check if the packet is being received in order + nextSequenceRecv, found := k.GetNextSequenceRecv(ctx, packet.GetDestPort(), packet.GetDestChannel()) + if !found { + return sdkerrors.Wrapf( + types.ErrSequenceReceiveNotFound, + "destination port: %s, destination channel: %s", packet.GetDestPort(), packet.GetDestChannel(), + ) + } + + if packet.GetSequence() != nextSequenceRecv { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet sequence ≠ next receive sequence (%d ≠ %d)", packet.GetSequence(), nextSequenceRecv, + ) + } + + // All verification complete, update state + // In ordered case, we must increment nextSequenceRecv + nextSequenceRecv++ + + // incrementing nextSequenceRecv and storing under this chain's channelEnd identifiers + // Since this is the receiving chain, our channelEnd is packet's destination port and channel + k.SetNextSequenceRecv(ctx, packet.GetDestPort(), packet.GetDestChannel(), nextSequenceRecv) + + } + + // log that a packet has been received & executed + k.Logger(ctx).Info("packet received", "packet", fmt.Sprintf("%v", packet)) + + // emit an event that the relayer can query for + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeRecvPacket, + sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())), + sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()), + sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())), + sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())), + sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()), + sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()), + sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()), + sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()), + sdk.NewAttribute(types.AttributeKeyChannelOrdering, channel.Ordering.String()), + // we only support 1-hop packets now, and that is the most important hop for a relayer + // (is it going to a chain I am connected to) + sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return nil +} + +// WriteAcknowledgement writes the packet execution acknowledgement to the state, +// which will be verified by the counterparty chain using AcknowledgePacket. +// +// CONTRACT: +// +// 1) For synchronous execution, this function is be called in the IBC handler . +// For async handling, it needs to be called directly by the module which originally +// processed the packet. +// +// 2) Assumes that packet receipt has been written (unordered), or nextSeqRecv was incremented (ordered) +// previously by RecvPacket. +func (k Keeper) WriteAcknowledgement( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, + acknowledgement []byte, +) error { + channel, found := k.GetChannel(ctx, packet.GetDestPort(), packet.GetDestChannel()) + if !found { + return sdkerrors.Wrap(types.ErrChannelNotFound, packet.GetDestChannel()) + } + + if channel.State != types.OPEN { + return sdkerrors.Wrapf( + types.ErrInvalidChannelState, + "channel state is not OPEN (got %s)", channel.State.String(), + ) + } + + // Authenticate capability to ensure caller has authority to receive packet on this channel + capName := host.ChannelCapabilityPath(packet.GetDestPort(), packet.GetDestChannel()) + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) { + return sdkerrors.Wrapf( + types.ErrInvalidChannelCapability, + "channel capability failed authentication for capability name %s", capName, + ) + } + + // NOTE: IBC app modules might have written the acknowledgement synchronously on + // the OnRecvPacket callback so we need to check if the acknowledgement is already + // set on the store and return an error if so. + if k.HasPacketAcknowledgement(ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) { + return types.ErrAcknowledgementExists + } + + if len(acknowledgement) == 0 { + return sdkerrors.Wrap(types.ErrInvalidAcknowledgement, "acknowledgement cannot be empty") + } + + // set the acknowledgement so that it can be verified on the other side + k.SetPacketAcknowledgement( + ctx, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), + types.CommitAcknowledgement(acknowledgement), + ) + + // log that a packet acknowledgement has been written + k.Logger(ctx).Info("acknowledged written", "packet", fmt.Sprintf("%v", packet)) + + // emit an event that the relayer can query for + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeWriteAck, + sdk.NewAttribute(types.AttributeKeyData, string(packet.GetData())), + sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()), + sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())), + sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())), + sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()), + sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()), + sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()), + sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()), + sdk.NewAttribute(types.AttributeKeyAck, string(acknowledgement)), + // we only support 1-hop packets now, and that is the most important hop for a relayer + // (is it going to a chain I am connected to) + sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return nil +} + +// AcknowledgePacket is called by a module to process the acknowledgement of a +// packet previously sent by the calling module on a channel to a counterparty +// module on the counterparty chain. Its intended usage is within the ante +// handler. AcknowledgePacket will clean up the packet commitment, +// which is no longer necessary since the packet has been received and acted upon. +// It will also increment NextSequenceAck in case of ORDERED channels. +func (k Keeper) AcknowledgePacket( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, + acknowledgement []byte, + proof []byte, + proofHeight exported.Height, +) error { + channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel()) + if !found { + return sdkerrors.Wrapf( + types.ErrChannelNotFound, + "port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel(), + ) + } + + if channel.State != types.OPEN { + return sdkerrors.Wrapf( + types.ErrInvalidChannelState, + "channel state is not OPEN (got %s)", channel.State.String(), + ) + } + + // Authenticate capability to ensure caller has authority to receive packet on this channel + capName := host.ChannelCapabilityPath(packet.GetSourcePort(), packet.GetSourceChannel()) + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) { + return sdkerrors.Wrapf( + types.ErrInvalidChannelCapability, + "channel capability failed authentication for capability name %s", capName, + ) + } + + // packet must have been sent to the channel's counterparty + if packet.GetDestPort() != channel.Counterparty.PortId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet destination port doesn't match the counterparty's port (%s ≠ %s)", packet.GetDestPort(), channel.Counterparty.PortId, + ) + } + + if packet.GetDestChannel() != channel.Counterparty.ChannelId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet destination channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetDestChannel(), channel.Counterparty.ChannelId, + ) + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0]) + } + + if connectionEnd.GetState() != int32(connectiontypes.OPEN) { + return sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnectionState, + "connection state is not OPEN (got %s)", connectiontypes.State(connectionEnd.GetState()).String(), + ) + } + + commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + + packetCommitment := types.CommitPacket(k.cdc, packet) + + // verify we sent the packet and haven't cleared it out yet + if !bytes.Equal(commitment, packetCommitment) { + return sdkerrors.Wrapf(types.ErrInvalidPacket, "commitment bytes are not equal: got (%v), expected (%v)", packetCommitment, commitment) + } + + if err := k.connectionKeeper.VerifyPacketAcknowledgement( + ctx, connectionEnd, proofHeight, proof, packet.GetDestPort(), packet.GetDestChannel(), + packet.GetSequence(), acknowledgement, + ); err != nil { + return sdkerrors.Wrap(err, "packet acknowledgement verification failed") + } + + // assert packets acknowledged in order + if channel.Ordering == types.ORDERED { + nextSequenceAck, found := k.GetNextSequenceAck(ctx, packet.GetSourcePort(), packet.GetSourceChannel()) + if !found { + return sdkerrors.Wrapf( + types.ErrSequenceAckNotFound, + "source port: %s, source channel: %s", packet.GetSourcePort(), packet.GetSourceChannel(), + ) + } + + if packet.GetSequence() != nextSequenceAck { + return sdkerrors.Wrapf( + sdkerrors.ErrInvalidSequence, + "packet sequence ≠ next ack sequence (%d ≠ %d)", packet.GetSequence(), nextSequenceAck, + ) + } + + // All verification complete, in the case of ORDERED channels we must increment nextSequenceAck + nextSequenceAck++ + + // incrementing NextSequenceAck and storing under this chain's channelEnd identifiers + // Since this is the original sending chain, our channelEnd is packet's source port and channel + k.SetNextSequenceAck(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), nextSequenceAck) + + } + + // Delete packet commitment, since the packet has been acknowledged, the commitement is no longer necessary + k.deletePacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + + // log that a packet has been acknowledged + k.Logger(ctx).Info("packet acknowledged", "packet", fmt.Sprintf("%v", packet)) + + // emit an event marking that we have processed the acknowledgement + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeAcknowledgePacket, + sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()), + sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())), + sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())), + sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()), + sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()), + sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()), + sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()), + sdk.NewAttribute(types.AttributeKeyChannelOrdering, channel.Ordering.String()), + // we only support 1-hop packets now, and that is the most important hop for a relayer + // (is it going to a chain I am connected to) + sdk.NewAttribute(types.AttributeKeyConnection, channel.ConnectionHops[0]), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return nil +} diff --git a/core/04-channel/keeper/packet_test.go b/core/04-channel/keeper/packet_test.go new file mode 100644 index 0000000000..232e687582 --- /dev/null +++ b/core/04-channel/keeper/packet_test.go @@ -0,0 +1,665 @@ +package keeper_test + +import ( + "fmt" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +var ( + validPacketData = []byte("VALID PACKET DATA") + disabledTimeoutTimestamp = uint64(0) + disabledTimeoutHeight = clienttypes.ZeroHeight() + timeoutHeight = clienttypes.NewHeight(0, 100) + + // for when the testing package cannot be used + clientIDA = "clientA" + clientIDB = "clientB" + connIDA = "connA" + connIDB = "connB" + portID = "portid" + channelIDA = "channelidA" + channelIDB = "channelidB" +) + +// TestSendPacket tests SendPacket from chainA to chainB +func (suite *KeeperTestSuite) TestSendPacket() { + var ( + packet exported.PacketI + channelCap *capabilitytypes.Capability + ) + + testCases := []testCase{ + {"success: UNORDERED channel", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"success: ORDERED channel", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"sending packet out of order on UNORDERED channel", func() { + // setup creates an unordered channel + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"sending packet out of order on ORDERED channel", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 5, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet basic validation failed, empty packet data", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket([]byte{}, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel not found", func() { + // use wrong channel naming + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel closed", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA) + suite.Require().NoError(err) + }, false}, + {"packet dest port ≠ channel counterparty port", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong port for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet dest channel ID ≠ channel counterparty channel ID", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong channel for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"connection not found", func() { + channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA} + channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB} + // pass channel check + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainA.GetContext(), + channelA.PortID, channelA.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version), + ) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"client state not found", func() { + _, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + // change connection client ID + connection := suite.chainA.GetConnection(connA) + connection.ClientId = ibctesting.InvalidID + suite.chainA.App.IBCKeeper.ConnectionKeeper.SetConnection(suite.chainA.GetContext(), connA.ID, connection) + + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"client state is frozen", func() { + _, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + + connection := suite.chainA.GetConnection(connA) + clientState := suite.chainA.GetClientState(connection.ClientId) + cs, ok := clientState.(*ibctmtypes.ClientState) + suite.Require().True(ok) + + // freeze client + cs.FrozenHeight = clienttypes.NewHeight(0, 1) + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), connection.ClientId, cs) + + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + + {"timeout height passed", func() { + clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use client state latest height for timeout + clientState := suite.chainA.GetClientState(clientA) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clientState.GetLatestHeight().(clienttypes.Height), disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"timeout timestamp passed", func() { + clientA, _, connA, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use latest time on client state + clientState := suite.chainA.GetClientState(clientA) + connection := suite.chainA.GetConnection(connA) + timestamp, err := suite.chainA.App.IBCKeeper.ConnectionKeeper.GetTimestampAtHeight(suite.chainA.GetContext(), connection, clientState.GetLatestHeight()) + suite.Require().NoError(err) + + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, timestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"next sequence send not found", func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + // manually creating channel prevents next sequence from being set + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainA.GetContext(), + channelA.PortID, channelA.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version), + ) + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"next sequence wrong", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceSend(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 5) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel capability not found", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = capabilitytypes.NewCapability(5) + }, false}, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + suite.SetupTest() // reset + + tc.malleate() + + err := suite.chainA.App.IBCKeeper.ChannelKeeper.SendPacket(suite.chainA.GetContext(), channelCap, packet) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + +} + +// TestRecvPacket test RecvPacket on chainB. Since packet commitment verification will always +// occur last (resource instensive), only tests expected to succeed and packet commitment +// verification tests need to simulate sending a packet from chainA to chainB. +func (suite *KeeperTestSuite) TestRecvPacket() { + var ( + packet exported.PacketI + channelCap *capabilitytypes.Capability + ) + + testCases := []testCase{ + {"success: ORDERED channel", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, true}, + {"success UNORDERED channel", func() { + // setup uses an UNORDERED channel + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, true}, + {"success with out of order packet: UNORDERED channel", func() { + // setup uses an UNORDERED channel + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + // send 2 packets + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + // set sequence to 2 + packet = types.NewPacket(validPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + err = suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + // attempts to receive packet 2 without receiving packet 1 + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, true}, + {"out of order packet failure with ORDERED channel", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + // send 2 packets + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + // set sequence to 2 + packet = types.NewPacket(validPacketData, 2, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + err = suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + // attempts to receive packet 2 without receiving packet 1 + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"channel not found", func() { + // use wrong channel naming + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"channel not open", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + suite.Require().NoError(err) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"capability cannot authenticate", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + channelCap = capabilitytypes.NewCapability(3) + }, false}, + {"packet source port ≠ channel counterparty port", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong port for dest + packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"packet source channel ID ≠ channel counterparty channel ID", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong port for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"connection not found", func() { + channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA} + channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB} + // pass channel check + suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainB.GetContext(), + channelB.PortID, channelB.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connIDB}, channelB.Version), + ) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"connection not OPEN", func() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + // connection on chainB is in INIT + connB, connA, err := suite.coordinator.ConnOpenInit(suite.chainB, suite.chainA, clientB, clientA) + suite.Require().NoError(err) + + channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort) + // pass channel check + suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainB.GetContext(), + channelB.PortID, channelB.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connB.ID}, channelB.Version), + ) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"timeout height passed", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"timeout timestamp passed", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, disabledTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"next receive sequence is not found", func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort) + + // manually creating channel prevents next recv sequence from being set + suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainB.GetContext(), + channelB.PortID, channelB.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connB.ID}, channelB.Version), + ) + + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + // manually set packet commitment + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.TestHash) + suite.chainB.CreateChannelCapability(channelB.PortID, channelB.ID) + + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"receipt already stored", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketReceipt(suite.chainB.GetContext(), channelB.PortID, channelB.ID, 1) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"validation failed", func() { + // packet commitment not set resulting in invalid proof + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + suite.SetupTest() // reset + tc.malleate() + + // get proof of packet commitment from chainA + packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + proof, proofHeight := suite.chainA.QueryProof(packetKey) + + err := suite.chainB.App.IBCKeeper.ChannelKeeper.RecvPacket(suite.chainB.GetContext(), channelCap, packet, proof, proofHeight) + + if tc.expPass { + suite.Require().NoError(err) + + channelB, _ := suite.chainB.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel()) + nextSeqRecv, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetNextSequenceRecv(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel()) + suite.Require().True(found) + receipt, receiptStored := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketReceipt(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + + if channelB.Ordering == types.ORDERED { + suite.Require().Equal(packet.GetSequence()+1, nextSeqRecv, "sequence not incremented in ordered channel") + suite.Require().False(receiptStored, "packet receipt stored on ORDERED channel") + } else { + suite.Require().Equal(uint64(1), nextSeqRecv, "sequence incremented for UNORDERED channel") + suite.Require().True(receiptStored, "packet receipt not stored after RecvPacket in UNORDERED channel") + suite.Require().Equal(string([]byte{byte(1)}), receipt, "packet receipt is not empty string") + } + } else { + suite.Require().Error(err) + } + }) + } + +} + +func (suite *KeeperTestSuite) TestWriteAcknowledgement() { + var ( + ack []byte + packet exported.PacketI + channelCap *capabilitytypes.Capability + ) + + testCases := []testCase{ + { + "success", + func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + ack = ibctesting.TestHash + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, + true, + }, + {"channel not found", func() { + // use wrong channel naming + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp) + ack = ibctesting.TestHash + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + {"channel not open", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + ack = ibctesting.TestHash + + err := suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + suite.Require().NoError(err) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, false}, + { + "capability authentication failed", + func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + ack = ibctesting.TestHash + channelCap = capabilitytypes.NewCapability(3) + }, + false, + }, + { + "no-op, already acked", + func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + ack = ibctesting.TestHash + suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack) + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, + false, + }, + { + "empty acknowledgement", + func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + ack = nil + channelCap = suite.chainB.GetChannelCapability(channelB.PortID, channelB.ID) + }, + false, + }, + } + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + suite.SetupTest() // reset + + tc.malleate() + + err := suite.chainB.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(suite.chainB.GetContext(), channelCap, packet, ack) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestAcknowledgePacket tests the call AcknowledgePacket on chainA. +func (suite *KeeperTestSuite) TestAcknowledgePacket() { + var ( + packet types.Packet + ack = ibcmock.MockAcknowledgement + + channelCap *capabilitytypes.Capability + ) + + testCases := []testCase{ + {"success on ordered channel", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // create packet receipt and acknowledgement + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"success on unordered channel", func() { + // setup uses an UNORDERED channel + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // create packet receipt and acknowledgement + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"channel not found", func() { + // use wrong channel naming + _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + }, false}, + {"channel not open", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA) + suite.Require().NoError(err) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"capability authentication failed", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // create packet receipt and acknowledgement + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + channelCap = capabilitytypes.NewCapability(3) + }, false}, + {"packet destination port ≠ channel counterparty port", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong port for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet destination channel ID ≠ channel counterparty channel ID", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong channel for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"connection not found", func() { + channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA} + channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB} + // pass channel check + suite.chainB.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainB.GetContext(), + channelB.PortID, channelB.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelA.PortID, channelA.ID), []string{connIDB}, channelB.Version), + ) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"connection not OPEN", func() { + clientA, clientB := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + // connection on chainA is in INIT + connA, connB, err := suite.coordinator.ConnOpenInit(suite.chainA, suite.chainB, clientA, clientB) + suite.Require().NoError(err) + + channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort) + // pass channel check + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainA.GetContext(), + channelA.PortID, channelA.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version), + ) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet hasn't been sent", func() { + // packet commitment never written + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet ack verification failed", func() { + // ack never written + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + // create packet commitment + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"next ack sequence not found", func() { + _, _, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, exported.Tendermint) + channelA := suite.chainA.NextTestChannel(connA, ibctesting.TransferPort) + channelB := suite.chainB.NextTestChannel(connB, ibctesting.TransferPort) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + // manually creating channel prevents next sequence acknowledgement from being set + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainA.GetContext(), + channelA.PortID, channelA.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connA.ID}, channelA.Version), + ) + // manually set packet commitment + suite.chainA.App.IBCKeeper.ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), channelA.PortID, channelA.ID, packet.GetSequence(), ibctesting.TestHash) + + // manually set packet acknowledgement and capability + suite.chainB.App.IBCKeeper.ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), channelB.PortID, channelB.ID, packet.GetSequence(), ibctesting.TestHash) + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"next ack sequence mismatch", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // create packet acknowledgement + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + // set next sequence ack wrong + suite.chainA.App.IBCKeeper.ChannelKeeper.SetNextSequenceAck(suite.chainA.GetContext(), channelA.PortID, channelA.ID, 10) + channelCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + suite.SetupTest() // reset + tc.malleate() + + packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight := suite.chainB.QueryProof(packetKey) + + err := suite.chainA.App.IBCKeeper.ChannelKeeper.AcknowledgePacket(suite.chainA.GetContext(), channelCap, packet, ack, proof, proofHeight) + pc := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + + channelA, _ := suite.chainA.App.IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel()) + sequenceAck, _ := suite.chainA.App.IBCKeeper.ChannelKeeper.GetNextSequenceAck(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel()) + + if tc.expPass { + suite.NoError(err) + suite.Nil(pc) + + if channelA.Ordering == types.ORDERED { + suite.Require().Equal(packet.GetSequence()+1, sequenceAck, "sequence not incremented in ordered channel") + } else { + suite.Require().Equal(uint64(1), sequenceAck, "sequence incremented for UNORDERED channel") + } + } else { + suite.Error(err) + } + }) + } +} diff --git a/core/04-channel/keeper/timeout.go b/core/04-channel/keeper/timeout.go new file mode 100644 index 0000000000..1f3dac918f --- /dev/null +++ b/core/04-channel/keeper/timeout.go @@ -0,0 +1,276 @@ +package keeper + +import ( + "bytes" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// TimeoutPacket is called by a module which originally attempted to send a +// packet to a counterparty module, where the timeout height has passed on the +// counterparty chain without the packet being committed, to prove that the +// packet can no longer be executed and to allow the calling module to safely +// perform appropriate state transitions. Its intended usage is within the +// ante handler. +func (k Keeper) TimeoutPacket( + ctx sdk.Context, + packet exported.PacketI, + proof []byte, + proofHeight exported.Height, + nextSequenceRecv uint64, +) error { + channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel()) + if !found { + return sdkerrors.Wrapf( + types.ErrChannelNotFound, + "port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel(), + ) + } + + if channel.State != types.OPEN { + return sdkerrors.Wrapf( + types.ErrInvalidChannelState, + "channel state is not OPEN (got %s)", channel.State.String(), + ) + } + + // NOTE: TimeoutPacket is called by the AnteHandler which acts upon the packet.Route(), + // so the capability authentication can be omitted here + + if packet.GetDestPort() != channel.Counterparty.PortId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet destination port doesn't match the counterparty's port (%s ≠ %s)", packet.GetDestPort(), channel.Counterparty.PortId, + ) + } + + if packet.GetDestChannel() != channel.Counterparty.ChannelId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet destination channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetDestChannel(), channel.Counterparty.ChannelId, + ) + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap( + connectiontypes.ErrConnectionNotFound, + channel.ConnectionHops[0], + ) + } + + // check that timeout height or timeout timestamp has passed on the other end + proofTimestamp, err := k.connectionKeeper.GetTimestampAtHeight(ctx, connectionEnd, proofHeight) + if err != nil { + return err + } + + timeoutHeight := packet.GetTimeoutHeight() + if (timeoutHeight.IsZero() || proofHeight.LT(timeoutHeight)) && + (packet.GetTimeoutTimestamp() == 0 || proofTimestamp < packet.GetTimeoutTimestamp()) { + return sdkerrors.Wrap(types.ErrPacketTimeout, "packet timeout has not been reached for height or timestamp") + } + + commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + + packetCommitment := types.CommitPacket(k.cdc, packet) + + // verify we sent the packet and haven't cleared it out yet + if !bytes.Equal(commitment, packetCommitment) { + return sdkerrors.Wrapf(types.ErrInvalidPacket, "packet commitment bytes are not equal: got (%v), expected (%v)", commitment, packetCommitment) + } + + switch channel.Ordering { + case types.ORDERED: + // check that packet has not been received + if nextSequenceRecv > packet.GetSequence() { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet already received, next sequence receive > packet sequence (%d > %d)", nextSequenceRecv, packet.GetSequence(), + ) + } + + // check that the recv sequence is as claimed + err = k.connectionKeeper.VerifyNextSequenceRecv( + ctx, connectionEnd, proofHeight, proof, + packet.GetDestPort(), packet.GetDestChannel(), nextSequenceRecv, + ) + case types.UNORDERED: + err = k.connectionKeeper.VerifyPacketReceiptAbsence( + ctx, connectionEnd, proofHeight, proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), + ) + default: + panic(sdkerrors.Wrapf(types.ErrInvalidChannelOrdering, channel.Ordering.String())) + } + + if err != nil { + return err + } + + // NOTE: the remaining code is located in the TimeoutExecuted function + return nil +} + +// TimeoutExecuted deletes the commitment send from this chain after it verifies timeout. +// If the timed-out packet came from an ORDERED channel then this channel will be closed. +// +// CONTRACT: this function must be called in the IBC handler +func (k Keeper) TimeoutExecuted( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, +) error { + channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel()) + if !found { + return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel()) + } + + capName := host.ChannelCapabilityPath(packet.GetSourcePort(), packet.GetSourceChannel()) + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) { + return sdkerrors.Wrapf( + types.ErrChannelCapabilityNotFound, + "caller does not own capability for channel with capability name %s", capName, + ) + } + + k.deletePacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + + if channel.Ordering == types.ORDERED { + channel.State = types.CLOSED + k.SetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), channel) + } + + k.Logger(ctx).Info("packet timed-out", "packet", fmt.Sprintf("%v", packet)) + + // emit an event marking that we have processed the timeout + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + types.EventTypeTimeoutPacket, + sdk.NewAttribute(types.AttributeKeyTimeoutHeight, packet.GetTimeoutHeight().String()), + sdk.NewAttribute(types.AttributeKeyTimeoutTimestamp, fmt.Sprintf("%d", packet.GetTimeoutTimestamp())), + sdk.NewAttribute(types.AttributeKeySequence, fmt.Sprintf("%d", packet.GetSequence())), + sdk.NewAttribute(types.AttributeKeySrcPort, packet.GetSourcePort()), + sdk.NewAttribute(types.AttributeKeySrcChannel, packet.GetSourceChannel()), + sdk.NewAttribute(types.AttributeKeyDstPort, packet.GetDestPort()), + sdk.NewAttribute(types.AttributeKeyDstChannel, packet.GetDestChannel()), + sdk.NewAttribute(types.AttributeKeyChannelOrdering, channel.Ordering.String()), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory), + ), + }) + + return nil +} + +// TimeoutOnClose is called by a module in order to prove that the channel to +// which an unreceived packet was addressed has been closed, so the packet will +// never be received (even if the timeoutHeight has not yet been reached). +func (k Keeper) TimeoutOnClose( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, + proof, + proofClosed []byte, + proofHeight exported.Height, + nextSequenceRecv uint64, +) error { + channel, found := k.GetChannel(ctx, packet.GetSourcePort(), packet.GetSourceChannel()) + if !found { + return sdkerrors.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", packet.GetSourcePort(), packet.GetSourceChannel()) + } + + capName := host.ChannelCapabilityPath(packet.GetSourcePort(), packet.GetSourceChannel()) + if !k.scopedKeeper.AuthenticateCapability(ctx, chanCap, capName) { + return sdkerrors.Wrapf( + types.ErrInvalidChannelCapability, + "channel capability failed authentication with capability name %s", capName, + ) + } + + if packet.GetDestPort() != channel.Counterparty.PortId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet destination port doesn't match the counterparty's port (%s ≠ %s)", packet.GetDestPort(), channel.Counterparty.PortId, + ) + } + + if packet.GetDestChannel() != channel.Counterparty.ChannelId { + return sdkerrors.Wrapf( + types.ErrInvalidPacket, + "packet destination channel doesn't match the counterparty's channel (%s ≠ %s)", packet.GetDestChannel(), channel.Counterparty.ChannelId, + ) + } + + connectionEnd, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !found { + return sdkerrors.Wrap(connectiontypes.ErrConnectionNotFound, channel.ConnectionHops[0]) + } + + commitment := k.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + + packetCommitment := types.CommitPacket(k.cdc, packet) + + // verify we sent the packet and haven't cleared it out yet + if !bytes.Equal(commitment, packetCommitment) { + return sdkerrors.Wrapf(types.ErrInvalidPacket, "packet commitment bytes are not equal: got (%v), expected (%v)", commitment, packetCommitment) + } + + counterpartyHops, found := k.CounterpartyHops(ctx, channel) + if !found { + // Should not reach here, connectionEnd was able to be retrieved above + panic("cannot find connection") + } + + counterparty := types.NewCounterparty(packet.GetSourcePort(), packet.GetSourceChannel()) + expectedChannel := types.NewChannel( + types.CLOSED, channel.Ordering, counterparty, counterpartyHops, channel.Version, + ) + + // check that the opposing channel end has closed + if err := k.connectionKeeper.VerifyChannelState( + ctx, connectionEnd, proofHeight, proofClosed, + channel.Counterparty.PortId, channel.Counterparty.ChannelId, + expectedChannel, + ); err != nil { + return err + } + + var err error + switch channel.Ordering { + case types.ORDERED: + // check that packet has not been received + if nextSequenceRecv > packet.GetSequence() { + return sdkerrors.Wrapf(types.ErrInvalidPacket, "packet already received, next sequence receive > packet sequence (%d > %d", nextSequenceRecv, packet.GetSequence()) + } + + // check that the recv sequence is as claimed + err = k.connectionKeeper.VerifyNextSequenceRecv( + ctx, connectionEnd, proofHeight, proof, + packet.GetDestPort(), packet.GetDestChannel(), nextSequenceRecv, + ) + case types.UNORDERED: + err = k.connectionKeeper.VerifyPacketReceiptAbsence( + ctx, connectionEnd, proofHeight, proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), + ) + default: + panic(sdkerrors.Wrapf(types.ErrInvalidChannelOrdering, channel.Ordering.String())) + } + + if err != nil { + return err + } + + // NOTE: the remaining code is located in the TimeoutExecuted function + return nil +} diff --git a/core/04-channel/keeper/timeout_test.go b/core/04-channel/keeper/timeout_test.go new file mode 100644 index 0000000000..640452e881 --- /dev/null +++ b/core/04-channel/keeper/timeout_test.go @@ -0,0 +1,351 @@ +package keeper_test + +import ( + "fmt" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +// TestTimeoutPacket test the TimeoutPacket call on chainA by ensuring the timeout has passed +// on chainB, but that no ack has been written yet. Test cases expected to reach proof +// verification must specify which proof to use using the ordered bool. +func (suite *KeeperTestSuite) TestTimeoutPacket() { + var ( + packet types.Packet + nextSeqRecv uint64 + ordered bool + ) + + testCases := []testCase{ + {"success: ORDERED", func() { + ordered = true + + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + // need to update chainA's client representing chainB to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + }, true}, + {"success: UNORDERED", func() { + ordered = false + + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + // need to update chainA's client representing chainB to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + }, true}, + {"channel not found", func() { + // use wrong channel naming + _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + }, false}, + {"channel not open", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + err := suite.coordinator.SetChannelClosed(suite.chainA, suite.chainB, channelA) + suite.Require().NoError(err) + }, false}, + {"packet destination port ≠ channel counterparty port", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong port for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + }, false}, + {"packet destination channel ID ≠ channel counterparty channel ID", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong channel for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp) + }, false}, + {"connection not found", func() { + channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA} + channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB} + // pass channel check + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainA.GetContext(), + channelA.PortID, channelA.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version), + ) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + }, false}, + {"timeout", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + }, false}, + {"packet already received ", func() { + ordered = true + nextSeqRecv = 2 + + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + }, false}, + {"packet hasn't been sent", func() { + clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + }, false}, + {"next seq receive verification failed", func() { + // set ordered to false resulting in wrong proof provided + ordered = false + + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + }, false}, + {"packet ack verification failed", func() { + // set ordered to true resulting in wrong proof provided + ordered = true + + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + }, false}, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + var ( + proof []byte + proofHeight exported.Height + ) + + suite.SetupTest() // reset + nextSeqRecv = 1 // must be explicitly changed + tc.malleate() + + orderedPacketKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + unorderedPacketKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + + if ordered { + proof, proofHeight = suite.chainB.QueryProof(orderedPacketKey) + } else { + proof, proofHeight = suite.chainB.QueryProof(unorderedPacketKey) + } + + err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutPacket(suite.chainA.GetContext(), packet, proof, proofHeight, nextSeqRecv) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// TestTimeoutExectued verifies that packet commitments are deleted on chainA after the +// channel capabilities are verified. +func (suite *KeeperTestSuite) TestTimeoutExecuted() { + var ( + packet types.Packet + chanCap *capabilitytypes.Capability + ) + + testCases := []testCase{ + {"success ORDERED", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"channel not found", func() { + // use wrong channel naming + _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + }, false}, + {"incorrect capability", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + + chanCap = capabilitytypes.NewCapability(100) + }, false}, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + suite.SetupTest() // reset + + tc.malleate() + + err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutExecuted(suite.chainA.GetContext(), chanCap, packet) + pc := suite.chainA.App.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + + if tc.expPass { + suite.NoError(err) + suite.Nil(pc) + } else { + suite.Error(err) + } + }) + } +} + +// TestTimeoutOnClose tests the call TimeoutOnClose on chainA by closing the corresponding +// channel on chainB after the packet commitment has been created. +func (suite *KeeperTestSuite) TestTimeoutOnClose() { + var ( + packet types.Packet + chanCap *capabilitytypes.Capability + nextSeqRecv uint64 + ordered bool + ) + + testCases := []testCase{ + {"success: ORDERED", func() { + ordered = true + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + // need to update chainA's client representing chainB to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"success: UNORDERED", func() { + ordered = false + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + // need to update chainA's client representing chainB to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, true}, + {"channel not found", func() { + // use wrong channel naming + _, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, ibctesting.InvalidID, ibctesting.InvalidID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + }, false}, + {"packet dest port ≠ channel counterparty port", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong port for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, ibctesting.InvalidID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet dest channel ID ≠ channel counterparty channel ID", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + // use wrong channel for dest + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, ibctesting.InvalidID, timeoutHeight, disabledTimeoutTimestamp) + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"connection not found", func() { + channelA := ibctesting.TestChannel{PortID: portID, ID: channelIDA} + channelB := ibctesting.TestChannel{PortID: portID, ID: channelIDB} + // pass channel check + suite.chainA.App.IBCKeeper.ChannelKeeper.SetChannel( + suite.chainA.GetContext(), + channelA.PortID, channelA.ID, + types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(channelB.PortID, channelB.ID), []string{connIDA}, channelA.Version), + ) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, disabledTimeoutTimestamp) + + // create chancap + suite.chainA.CreateChannelCapability(channelA.PortID, channelA.ID) + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet hasn't been sent", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet already received", func() { + nextSeqRecv = 2 + ordered = true + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + // need to update chainA's client representing chainB to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel verification failed", func() { + ordered = true + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"next seq receive verification failed", func() { + // set ordered to false providing the wrong proof for ORDERED case + ordered = false + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"packet ack verification failed", func() { + // set ordered to true providing the wrong proof for UNORDERED case + ordered = true + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.UNORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + chanCap = suite.chainA.GetChannelCapability(channelA.PortID, channelA.ID) + }, false}, + {"channel capability not found", func() { + ordered = true + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, types.ORDERED) + packet = types.NewPacket(validPacketData, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, channelB) + // need to update chainA's client representing chainB to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + chanCap = capabilitytypes.NewCapability(100) + }, false}, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + var proof []byte + + suite.SetupTest() // reset + nextSeqRecv = 1 // must be explicitly changed + tc.malleate() + + channelKey := host.ChannelKey(packet.GetDestPort(), packet.GetDestChannel()) + unorderedPacketKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + orderedPacketKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + + proofClosed, proofHeight := suite.chainB.QueryProof(channelKey) + + if ordered { + proof, _ = suite.chainB.QueryProof(orderedPacketKey) + } else { + proof, _ = suite.chainB.QueryProof(unorderedPacketKey) + } + + err := suite.chainA.App.IBCKeeper.ChannelKeeper.TimeoutOnClose(suite.chainA.GetContext(), chanCap, packet, proof, proofClosed, proofHeight, nextSeqRecv) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + +} diff --git a/core/04-channel/module.go b/core/04-channel/module.go new file mode 100644 index 0000000000..569120ad92 --- /dev/null +++ b/core/04-channel/module.go @@ -0,0 +1,29 @@ +package channel + +import ( + "github.com/gogo/protobuf/grpc" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/client/cli" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// Name returns the IBC channel ICS name. +func Name() string { + return types.SubModuleName +} + +// GetTxCmd returns the root tx command for IBC channels. +func GetTxCmd() *cobra.Command { + return cli.NewTxCmd() +} + +// GetQueryCmd returns the root query command for IBC channels. +func GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// RegisterQueryService registers the gRPC query service for IBC channels. +func RegisterQueryService(server grpc.Server, queryServer types.QueryServer) { + types.RegisterQueryServer(server, queryServer) +} diff --git a/core/04-channel/simulation/decoder.go b/core/04-channel/simulation/decoder.go new file mode 100644 index 0000000000..809976cc0e --- /dev/null +++ b/core/04-channel/simulation/decoder.go @@ -0,0 +1,48 @@ +package simulation + +import ( + "bytes" + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/kv" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's +// Value to the corresponding channel type. +func NewDecodeStore(cdc codec.BinaryMarshaler, kvA, kvB kv.Pair) (string, bool) { + switch { + case bytes.HasPrefix(kvA.Key, []byte(host.KeyChannelEndPrefix)): + var channelA, channelB types.Channel + cdc.MustUnmarshalBinaryBare(kvA.Value, &channelA) + cdc.MustUnmarshalBinaryBare(kvB.Value, &channelB) + return fmt.Sprintf("Channel A: %v\nChannel B: %v", channelA, channelB), true + + case bytes.HasPrefix(kvA.Key, []byte(host.KeyNextSeqSendPrefix)): + seqA := sdk.BigEndianToUint64(kvA.Value) + seqB := sdk.BigEndianToUint64(kvB.Value) + return fmt.Sprintf("NextSeqSend A: %d\nNextSeqSend B: %d", seqA, seqB), true + + case bytes.HasPrefix(kvA.Key, []byte(host.KeyNextSeqRecvPrefix)): + seqA := sdk.BigEndianToUint64(kvA.Value) + seqB := sdk.BigEndianToUint64(kvB.Value) + return fmt.Sprintf("NextSeqRecv A: %d\nNextSeqRecv B: %d", seqA, seqB), true + + case bytes.HasPrefix(kvA.Key, []byte(host.KeyNextSeqAckPrefix)): + seqA := sdk.BigEndianToUint64(kvA.Value) + seqB := sdk.BigEndianToUint64(kvB.Value) + return fmt.Sprintf("NextSeqAck A: %d\nNextSeqAck B: %d", seqA, seqB), true + + case bytes.HasPrefix(kvA.Key, []byte(host.KeyPacketCommitmentPrefix)): + return fmt.Sprintf("CommitmentHash A: %X\nCommitmentHash B: %X", kvA.Value, kvB.Value), true + + case bytes.HasPrefix(kvA.Key, []byte(host.KeyPacketAckPrefix)): + return fmt.Sprintf("AckHash A: %X\nAckHash B: %X", kvA.Value, kvB.Value), true + + default: + return "", false + } +} diff --git a/core/04-channel/simulation/decoder_test.go b/core/04-channel/simulation/decoder_test.go new file mode 100644 index 0000000000..5f2ba2f5ec --- /dev/null +++ b/core/04-channel/simulation/decoder_test.go @@ -0,0 +1,89 @@ +package simulation_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/simapp" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/kv" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +func TestDecodeStore(t *testing.T) { + app := simapp.Setup(false) + cdc := app.AppCodec() + + channelID := "channelidone" + portID := "portidone" + + channel := types.Channel{ + State: types.OPEN, + Version: "1.0", + } + + bz := []byte{0x1, 0x2, 0x3} + + kvPairs := kv.Pairs{ + Pairs: []kv.Pair{ + { + Key: host.ChannelKey(portID, channelID), + Value: cdc.MustMarshalBinaryBare(&channel), + }, + { + Key: host.NextSequenceSendKey(portID, channelID), + Value: sdk.Uint64ToBigEndian(1), + }, + { + Key: host.NextSequenceRecvKey(portID, channelID), + Value: sdk.Uint64ToBigEndian(1), + }, + { + Key: host.NextSequenceAckKey(portID, channelID), + Value: sdk.Uint64ToBigEndian(1), + }, + { + Key: host.PacketCommitmentKey(portID, channelID, 1), + Value: bz, + }, + { + Key: host.PacketAcknowledgementKey(portID, channelID, 1), + Value: bz, + }, + { + Key: []byte{0x99}, + Value: []byte{0x99}, + }, + }, + } + tests := []struct { + name string + expectedLog string + }{ + {"Channel", fmt.Sprintf("Channel A: %v\nChannel B: %v", channel, channel)}, + {"NextSeqSend", "NextSeqSend A: 1\nNextSeqSend B: 1"}, + {"NextSeqRecv", "NextSeqRecv A: 1\nNextSeqRecv B: 1"}, + {"NextSeqAck", "NextSeqAck A: 1\nNextSeqAck B: 1"}, + {"CommitmentHash", fmt.Sprintf("CommitmentHash A: %X\nCommitmentHash B: %X", bz, bz)}, + {"AckHash", fmt.Sprintf("AckHash A: %X\nAckHash B: %X", bz, bz)}, + {"other", ""}, + } + + for i, tt := range tests { + i, tt := i, tt + t.Run(tt.name, func(t *testing.T) { + res, found := simulation.NewDecodeStore(cdc, kvPairs.Pairs[i], kvPairs.Pairs[i]) + if i == len(tests)-1 { + require.False(t, found, string(kvPairs.Pairs[i].Key)) + require.Empty(t, res, string(kvPairs.Pairs[i].Key)) + } else { + require.True(t, found, string(kvPairs.Pairs[i].Key)) + require.Equal(t, tt.expectedLog, res, string(kvPairs.Pairs[i].Key)) + } + }) + } +} diff --git a/core/04-channel/simulation/genesis.go b/core/04-channel/simulation/genesis.go new file mode 100644 index 0000000000..ed33902191 --- /dev/null +++ b/core/04-channel/simulation/genesis.go @@ -0,0 +1,13 @@ +package simulation + +import ( + "math/rand" + + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// GenChannelGenesis returns the default channel genesis state. +func GenChannelGenesis(_ *rand.Rand, _ []simtypes.Account) types.GenesisState { + return types.DefaultGenesisState() +} diff --git a/core/04-channel/types/channel.go b/core/04-channel/types/channel.go new file mode 100644 index 0000000000..8513a8123d --- /dev/null +++ b/core/04-channel/types/channel.go @@ -0,0 +1,172 @@ +package types + +import ( + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + _ exported.ChannelI = (*Channel)(nil) + _ exported.CounterpartyChannelI = (*Counterparty)(nil) +) + +// NewChannel creates a new Channel instance +func NewChannel( + state State, ordering Order, counterparty Counterparty, + hops []string, version string, +) Channel { + return Channel{ + State: state, + Ordering: ordering, + Counterparty: counterparty, + ConnectionHops: hops, + Version: version, + } +} + +// GetState implements Channel interface. +func (ch Channel) GetState() int32 { + return int32(ch.State) +} + +// GetOrdering implements Channel interface. +func (ch Channel) GetOrdering() int32 { + return int32(ch.Ordering) +} + +// GetCounterparty implements Channel interface. +func (ch Channel) GetCounterparty() exported.CounterpartyChannelI { + return ch.Counterparty +} + +// GetConnectionHops implements Channel interface. +func (ch Channel) GetConnectionHops() []string { + return ch.ConnectionHops +} + +// GetVersion implements Channel interface. +func (ch Channel) GetVersion() string { + return ch.Version +} + +// ValidateBasic performs a basic validation of the channel fields +func (ch Channel) ValidateBasic() error { + if ch.State == UNINITIALIZED { + return ErrInvalidChannelState + } + if !(ch.Ordering == ORDERED || ch.Ordering == UNORDERED) { + return sdkerrors.Wrap(ErrInvalidChannelOrdering, ch.Ordering.String()) + } + if len(ch.ConnectionHops) != 1 { + return sdkerrors.Wrap( + ErrTooManyConnectionHops, + "current IBC version only supports one connection hop", + ) + } + if err := host.ConnectionIdentifierValidator(ch.ConnectionHops[0]); err != nil { + return sdkerrors.Wrap(err, "invalid connection hop ID") + } + return ch.Counterparty.ValidateBasic() +} + +// NewCounterparty returns a new Counterparty instance +func NewCounterparty(portID, channelID string) Counterparty { + return Counterparty{ + PortId: portID, + ChannelId: channelID, + } +} + +// GetPortID implements CounterpartyChannelI interface +func (c Counterparty) GetPortID() string { + return c.PortId +} + +// GetChannelID implements CounterpartyChannelI interface +func (c Counterparty) GetChannelID() string { + return c.ChannelId +} + +// ValidateBasic performs a basic validation check of the identifiers +func (c Counterparty) ValidateBasic() error { + if err := host.PortIdentifierValidator(c.PortId); err != nil { + return sdkerrors.Wrap(err, "invalid counterparty port ID") + } + if c.ChannelId != "" { + if err := host.ChannelIdentifierValidator(c.ChannelId); err != nil { + return sdkerrors.Wrap(err, "invalid counterparty channel ID") + } + } + return nil +} + +// NewIdentifiedChannel creates a new IdentifiedChannel instance +func NewIdentifiedChannel(portID, channelID string, ch Channel) IdentifiedChannel { + return IdentifiedChannel{ + State: ch.State, + Ordering: ch.Ordering, + Counterparty: ch.Counterparty, + ConnectionHops: ch.ConnectionHops, + Version: ch.Version, + PortId: portID, + ChannelId: channelID, + } +} + +// ValidateBasic performs a basic validation of the identifiers and channel fields. +func (ic IdentifiedChannel) ValidateBasic() error { + if err := host.ChannelIdentifierValidator(ic.ChannelId); err != nil { + return sdkerrors.Wrap(err, "invalid channel ID") + } + if err := host.PortIdentifierValidator(ic.PortId); err != nil { + return sdkerrors.Wrap(err, "invalid port ID") + } + channel := NewChannel(ic.State, ic.Ordering, ic.Counterparty, ic.ConnectionHops, ic.Version) + return channel.ValidateBasic() +} + +// NewResultAcknowledgement returns a new instance of Acknowledgement using an Acknowledgement_Result +// type in the Response field. +func NewResultAcknowledgement(result []byte) Acknowledgement { + return Acknowledgement{ + Response: &Acknowledgement_Result{ + Result: result, + }, + } +} + +// NewErrorAcknowledgement returns a new instance of Acknowledgement using an Acknowledgement_Error +// type in the Response field. +func NewErrorAcknowledgement(err string) Acknowledgement { + return Acknowledgement{ + Response: &Acknowledgement_Error{ + Error: err, + }, + } +} + +// GetBytes is a helper for serialising acknowledgements +func (ack Acknowledgement) GetBytes() []byte { + return sdk.MustSortJSON(SubModuleCdc.MustMarshalJSON(&ack)) +} + +// ValidateBasic performs a basic validation of the acknowledgement +func (ack Acknowledgement) ValidateBasic() error { + switch resp := ack.Response.(type) { + case *Acknowledgement_Result: + if len(resp.Result) == 0 { + return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement result cannot be empty") + } + case *Acknowledgement_Error: + if strings.TrimSpace(resp.Error) == "" { + return sdkerrors.Wrap(ErrInvalidAcknowledgement, "acknowledgement error cannot be empty") + } + default: + return sdkerrors.Wrapf(ErrInvalidAcknowledgement, "unsupported acknowledgement response field type %T", resp) + } + return nil +} diff --git a/core/04-channel/types/channel.pb.go b/core/04-channel/types/channel.pb.go new file mode 100644 index 0000000000..dada2008ec --- /dev/null +++ b/core/04-channel/types/channel.pb.go @@ -0,0 +1,2270 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/channel/v1/channel.proto + +package types + +import ( + fmt "fmt" + types "github.com/cosmos/ibc-go/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State defines if a channel is in one of the following states: +// CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED. +type State int32 + +const ( + // Default State + UNINITIALIZED State = 0 + // A channel has just started the opening handshake. + INIT State = 1 + // A channel has acknowledged the handshake step on the counterparty chain. + TRYOPEN State = 2 + // A channel has completed the handshake. Open channels are + // ready to send and receive packets. + OPEN State = 3 + // A channel has been closed and can no longer be used to send or receive + // packets. + CLOSED State = 4 +) + +var State_name = map[int32]string{ + 0: "STATE_UNINITIALIZED_UNSPECIFIED", + 1: "STATE_INIT", + 2: "STATE_TRYOPEN", + 3: "STATE_OPEN", + 4: "STATE_CLOSED", +} + +var State_value = map[string]int32{ + "STATE_UNINITIALIZED_UNSPECIFIED": 0, + "STATE_INIT": 1, + "STATE_TRYOPEN": 2, + "STATE_OPEN": 3, + "STATE_CLOSED": 4, +} + +func (x State) String() string { + return proto.EnumName(State_name, int32(x)) +} + +func (State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3a7a8797f9808eee, []int{0} +} + +// Order defines if a channel is ORDERED or UNORDERED +type Order int32 + +const ( + // zero-value for channel ordering + NONE Order = 0 + // packets can be delivered in any order, which may differ from the order in + // which they were sent. + UNORDERED Order = 1 + // packets are delivered exactly in the order which they were sent + ORDERED Order = 2 +) + +var Order_name = map[int32]string{ + 0: "ORDER_NONE_UNSPECIFIED", + 1: "ORDER_UNORDERED", + 2: "ORDER_ORDERED", +} + +var Order_value = map[string]int32{ + "ORDER_NONE_UNSPECIFIED": 0, + "ORDER_UNORDERED": 1, + "ORDER_ORDERED": 2, +} + +func (x Order) String() string { + return proto.EnumName(Order_name, int32(x)) +} + +func (Order) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3a7a8797f9808eee, []int{1} +} + +// Channel defines pipeline for exactly-once packet delivery between specific +// modules on separate blockchains, which has at least one end capable of +// sending packets and one end capable of receiving packets. +type Channel struct { + // current state of the channel end + State State `protobuf:"varint,1,opt,name=state,proto3,enum=ibcgo.core.channel.v1.State" json:"state,omitempty"` + // whether the channel is ordered or unordered + Ordering Order `protobuf:"varint,2,opt,name=ordering,proto3,enum=ibcgo.core.channel.v1.Order" json:"ordering,omitempty"` + // counterparty channel end + Counterparty Counterparty `protobuf:"bytes,3,opt,name=counterparty,proto3" json:"counterparty"` + // list of connection identifiers, in order, along which packets sent on + // this channel will travel + ConnectionHops []string `protobuf:"bytes,4,rep,name=connection_hops,json=connectionHops,proto3" json:"connection_hops,omitempty" yaml:"connection_hops"` + // opaque channel version, which is agreed upon during the handshake + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *Channel) Reset() { *m = Channel{} } +func (m *Channel) String() string { return proto.CompactTextString(m) } +func (*Channel) ProtoMessage() {} +func (*Channel) Descriptor() ([]byte, []int) { + return fileDescriptor_3a7a8797f9808eee, []int{0} +} +func (m *Channel) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Channel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Channel.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Channel) XXX_Merge(src proto.Message) { + xxx_messageInfo_Channel.Merge(m, src) +} +func (m *Channel) XXX_Size() int { + return m.Size() +} +func (m *Channel) XXX_DiscardUnknown() { + xxx_messageInfo_Channel.DiscardUnknown(m) +} + +var xxx_messageInfo_Channel proto.InternalMessageInfo + +// IdentifiedChannel defines a channel with additional port and channel +// identifier fields. +type IdentifiedChannel struct { + // current state of the channel end + State State `protobuf:"varint,1,opt,name=state,proto3,enum=ibcgo.core.channel.v1.State" json:"state,omitempty"` + // whether the channel is ordered or unordered + Ordering Order `protobuf:"varint,2,opt,name=ordering,proto3,enum=ibcgo.core.channel.v1.Order" json:"ordering,omitempty"` + // counterparty channel end + Counterparty Counterparty `protobuf:"bytes,3,opt,name=counterparty,proto3" json:"counterparty"` + // list of connection identifiers, in order, along which packets sent on + // this channel will travel + ConnectionHops []string `protobuf:"bytes,4,rep,name=connection_hops,json=connectionHops,proto3" json:"connection_hops,omitempty" yaml:"connection_hops"` + // opaque channel version, which is agreed upon during the handshake + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + // port identifier + PortId string `protobuf:"bytes,6,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel identifier + ChannelId string `protobuf:"bytes,7,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *IdentifiedChannel) Reset() { *m = IdentifiedChannel{} } +func (m *IdentifiedChannel) String() string { return proto.CompactTextString(m) } +func (*IdentifiedChannel) ProtoMessage() {} +func (*IdentifiedChannel) Descriptor() ([]byte, []int) { + return fileDescriptor_3a7a8797f9808eee, []int{1} +} +func (m *IdentifiedChannel) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IdentifiedChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IdentifiedChannel.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IdentifiedChannel) XXX_Merge(src proto.Message) { + xxx_messageInfo_IdentifiedChannel.Merge(m, src) +} +func (m *IdentifiedChannel) XXX_Size() int { + return m.Size() +} +func (m *IdentifiedChannel) XXX_DiscardUnknown() { + xxx_messageInfo_IdentifiedChannel.DiscardUnknown(m) +} + +var xxx_messageInfo_IdentifiedChannel proto.InternalMessageInfo + +// Counterparty defines a channel end counterparty +type Counterparty struct { + // port on the counterparty chain which owns the other end of the channel. + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + // channel end on the counterparty chain + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` +} + +func (m *Counterparty) Reset() { *m = Counterparty{} } +func (m *Counterparty) String() string { return proto.CompactTextString(m) } +func (*Counterparty) ProtoMessage() {} +func (*Counterparty) Descriptor() ([]byte, []int) { + return fileDescriptor_3a7a8797f9808eee, []int{2} +} +func (m *Counterparty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counterparty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Counterparty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Counterparty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counterparty.Merge(m, src) +} +func (m *Counterparty) XXX_Size() int { + return m.Size() +} +func (m *Counterparty) XXX_DiscardUnknown() { + xxx_messageInfo_Counterparty.DiscardUnknown(m) +} + +var xxx_messageInfo_Counterparty proto.InternalMessageInfo + +// Packet defines a type that carries data across different chains through IBC +type Packet struct { + // number corresponds to the order of sends and receives, where a Packet + // with an earlier sequence number must be sent and received before a Packet + // with a later sequence number. + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + // identifies the port on the sending chain. + SourcePort string `protobuf:"bytes,2,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty" yaml:"source_port"` + // identifies the channel end on the sending chain. + SourceChannel string `protobuf:"bytes,3,opt,name=source_channel,json=sourceChannel,proto3" json:"source_channel,omitempty" yaml:"source_channel"` + // identifies the port on the receiving chain. + DestinationPort string `protobuf:"bytes,4,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty" yaml:"destination_port"` + // identifies the channel end on the receiving chain. + DestinationChannel string `protobuf:"bytes,5,opt,name=destination_channel,json=destinationChannel,proto3" json:"destination_channel,omitempty" yaml:"destination_channel"` + // actual opaque bytes transferred directly to the application module + Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` + // block height after which the packet times out + TimeoutHeight types.Height `protobuf:"bytes,7,opt,name=timeout_height,json=timeoutHeight,proto3" json:"timeout_height" yaml:"timeout_height"` + // block timestamp (in nanoseconds) after which the packet times out + TimeoutTimestamp uint64 `protobuf:"varint,8,opt,name=timeout_timestamp,json=timeoutTimestamp,proto3" json:"timeout_timestamp,omitempty" yaml:"timeout_timestamp"` +} + +func (m *Packet) Reset() { *m = Packet{} } +func (m *Packet) String() string { return proto.CompactTextString(m) } +func (*Packet) ProtoMessage() {} +func (*Packet) Descriptor() ([]byte, []int) { + return fileDescriptor_3a7a8797f9808eee, []int{3} +} +func (m *Packet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Packet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Packet) XXX_Merge(src proto.Message) { + xxx_messageInfo_Packet.Merge(m, src) +} +func (m *Packet) XXX_Size() int { + return m.Size() +} +func (m *Packet) XXX_DiscardUnknown() { + xxx_messageInfo_Packet.DiscardUnknown(m) +} + +var xxx_messageInfo_Packet proto.InternalMessageInfo + +// PacketState defines the generic type necessary to retrieve and store +// packet commitments, acknowledgements, and receipts. +// Caller is responsible for knowing the context necessary to interpret this +// state as a commitment, acknowledgement, or a receipt. +type PacketState struct { + // channel port identifier. + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + // channel unique identifier. + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` + // packet sequence. + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` + // embedded data that represents packet state. + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *PacketState) Reset() { *m = PacketState{} } +func (m *PacketState) String() string { return proto.CompactTextString(m) } +func (*PacketState) ProtoMessage() {} +func (*PacketState) Descriptor() ([]byte, []int) { + return fileDescriptor_3a7a8797f9808eee, []int{4} +} +func (m *PacketState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketState) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketState.Merge(m, src) +} +func (m *PacketState) XXX_Size() int { + return m.Size() +} +func (m *PacketState) XXX_DiscardUnknown() { + xxx_messageInfo_PacketState.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketState proto.InternalMessageInfo + +// Acknowledgement is the recommended acknowledgement format to be used by +// app-specific protocols. +// NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental +// conflicts with other protobuf message formats used for acknowledgements. +// The first byte of any message with this format will be the non-ASCII values +// `0xaa` (result) or `0xb2` (error). Implemented as defined by ICS: +// https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope +type Acknowledgement struct { + // response contains either a result or an error and must be non-empty + // + // Types that are valid to be assigned to Response: + // *Acknowledgement_Result + // *Acknowledgement_Error + Response isAcknowledgement_Response `protobuf_oneof:"response"` +} + +func (m *Acknowledgement) Reset() { *m = Acknowledgement{} } +func (m *Acknowledgement) String() string { return proto.CompactTextString(m) } +func (*Acknowledgement) ProtoMessage() {} +func (*Acknowledgement) Descriptor() ([]byte, []int) { + return fileDescriptor_3a7a8797f9808eee, []int{5} +} +func (m *Acknowledgement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Acknowledgement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Acknowledgement.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Acknowledgement) XXX_Merge(src proto.Message) { + xxx_messageInfo_Acknowledgement.Merge(m, src) +} +func (m *Acknowledgement) XXX_Size() int { + return m.Size() +} +func (m *Acknowledgement) XXX_DiscardUnknown() { + xxx_messageInfo_Acknowledgement.DiscardUnknown(m) +} + +var xxx_messageInfo_Acknowledgement proto.InternalMessageInfo + +type isAcknowledgement_Response interface { + isAcknowledgement_Response() + MarshalTo([]byte) (int, error) + Size() int +} + +type Acknowledgement_Result struct { + Result []byte `protobuf:"bytes,21,opt,name=result,proto3,oneof" json:"result,omitempty"` +} +type Acknowledgement_Error struct { + Error string `protobuf:"bytes,22,opt,name=error,proto3,oneof" json:"error,omitempty"` +} + +func (*Acknowledgement_Result) isAcknowledgement_Response() {} +func (*Acknowledgement_Error) isAcknowledgement_Response() {} + +func (m *Acknowledgement) GetResponse() isAcknowledgement_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *Acknowledgement) GetResult() []byte { + if x, ok := m.GetResponse().(*Acknowledgement_Result); ok { + return x.Result + } + return nil +} + +func (m *Acknowledgement) GetError() string { + if x, ok := m.GetResponse().(*Acknowledgement_Error); ok { + return x.Error + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Acknowledgement) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Acknowledgement_Result)(nil), + (*Acknowledgement_Error)(nil), + } +} + +func init() { + proto.RegisterEnum("ibcgo.core.channel.v1.State", State_name, State_value) + proto.RegisterEnum("ibcgo.core.channel.v1.Order", Order_name, Order_value) + proto.RegisterType((*Channel)(nil), "ibcgo.core.channel.v1.Channel") + proto.RegisterType((*IdentifiedChannel)(nil), "ibcgo.core.channel.v1.IdentifiedChannel") + proto.RegisterType((*Counterparty)(nil), "ibcgo.core.channel.v1.Counterparty") + proto.RegisterType((*Packet)(nil), "ibcgo.core.channel.v1.Packet") + proto.RegisterType((*PacketState)(nil), "ibcgo.core.channel.v1.PacketState") + proto.RegisterType((*Acknowledgement)(nil), "ibcgo.core.channel.v1.Acknowledgement") +} + +func init() { + proto.RegisterFile("ibcgo/core/channel/v1/channel.proto", fileDescriptor_3a7a8797f9808eee) +} + +var fileDescriptor_3a7a8797f9808eee = []byte{ + // 904 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6e, 0xe3, 0x46, + 0x10, 0x16, 0x65, 0xea, 0x6f, 0x64, 0xc9, 0xf2, 0x26, 0xd6, 0x31, 0x8c, 0x4f, 0x54, 0x78, 0x29, + 0x8c, 0x0b, 0x4e, 0x8a, 0x9d, 0x43, 0x12, 0x5c, 0x15, 0x4b, 0xe2, 0x41, 0x04, 0x2e, 0x92, 0x41, + 0xcb, 0x45, 0xae, 0x11, 0x28, 0x72, 0x23, 0x11, 0x27, 0x71, 0x15, 0x72, 0x65, 0xc3, 0x65, 0xba, + 0x83, 0xaa, 0xbc, 0x80, 0x80, 0x00, 0x01, 0xd2, 0xe4, 0x01, 0xf2, 0x0a, 0x57, 0x5e, 0x99, 0x8a, + 0x08, 0xec, 0x3a, 0x8d, 0x9e, 0x20, 0xe0, 0x2e, 0xa9, 0xbf, 0xcb, 0x4f, 0x97, 0x2a, 0x95, 0x76, + 0xe6, 0xfb, 0x66, 0xe6, 0xdb, 0x99, 0x11, 0x17, 0x1e, 0x39, 0x03, 0x6b, 0x48, 0xea, 0x16, 0xf1, + 0x70, 0xdd, 0x1a, 0x99, 0xae, 0x8b, 0xc7, 0xf5, 0xeb, 0xd3, 0xf8, 0x58, 0x9b, 0x7a, 0x84, 0x12, + 0x74, 0xc4, 0x48, 0xb5, 0x90, 0x54, 0x8b, 0x91, 0xeb, 0x53, 0xf9, 0xfd, 0x21, 0x19, 0x12, 0xc6, + 0xa8, 0x87, 0x27, 0x4e, 0x96, 0x3f, 0xda, 0xcc, 0x38, 0x76, 0xb0, 0x4b, 0x59, 0x42, 0x76, 0xe2, + 0x14, 0xf5, 0x97, 0x24, 0x64, 0x9a, 0x3c, 0x0f, 0x3a, 0x83, 0x94, 0x4f, 0x4d, 0x8a, 0x25, 0xa1, + 0x2a, 0x9c, 0x14, 0xcf, 0x8e, 0x6b, 0x7f, 0x59, 0xab, 0x76, 0x19, 0x72, 0x0c, 0x4e, 0x45, 0x5f, + 0x42, 0x96, 0x78, 0x36, 0xf6, 0x1c, 0x77, 0x28, 0x25, 0xff, 0x31, 0xac, 0x1b, 0xd2, 0x8c, 0x15, + 0x1b, 0x7d, 0x0d, 0xfb, 0x16, 0x99, 0xb9, 0x14, 0x7b, 0x53, 0xd3, 0xa3, 0xb7, 0xd2, 0x5e, 0x55, + 0x38, 0xc9, 0x9f, 0x3d, 0xfa, 0x9b, 0xe8, 0xe6, 0x06, 0xb5, 0x21, 0xbe, 0x09, 0x94, 0x84, 0xb1, + 0x15, 0x8e, 0x9a, 0x70, 0x60, 0x11, 0xd7, 0xc5, 0x16, 0x75, 0x88, 0xdb, 0x1f, 0x91, 0xa9, 0x2f, + 0x89, 0xd5, 0xbd, 0x93, 0x5c, 0x43, 0x5e, 0x06, 0x4a, 0xf9, 0xd6, 0x9c, 0x8c, 0x9f, 0xa9, 0x3b, + 0x04, 0xd5, 0x28, 0xae, 0x3d, 0x6d, 0x32, 0xf5, 0x91, 0x04, 0x99, 0x6b, 0xec, 0xf9, 0x0e, 0x71, + 0xa5, 0x54, 0x55, 0x38, 0xc9, 0x19, 0xb1, 0xf9, 0x4c, 0x7c, 0xfd, 0xa3, 0x92, 0x50, 0xff, 0x48, + 0xc2, 0xa1, 0x6e, 0x63, 0x97, 0x3a, 0xdf, 0x3a, 0xd8, 0xfe, 0xbf, 0x6f, 0xff, 0xda, 0x37, 0xf4, + 0x00, 0x32, 0x53, 0xe2, 0xd1, 0xbe, 0x63, 0x4b, 0x69, 0x86, 0xa4, 0x43, 0x53, 0xb7, 0xd1, 0x43, + 0x80, 0x48, 0x66, 0x88, 0x65, 0x18, 0x96, 0x8b, 0x3c, 0xba, 0x1d, 0xf5, 0xfb, 0x06, 0xf6, 0x37, + 0x2f, 0x80, 0x3e, 0x59, 0x67, 0x0b, 0x7b, 0x9d, 0x6b, 0xa0, 0x65, 0xa0, 0x14, 0xb9, 0xc8, 0x08, + 0x50, 0x57, 0x15, 0x9e, 0x6e, 0x55, 0x48, 0x32, 0xfe, 0xd1, 0x32, 0x50, 0x0e, 0xa3, 0x4b, 0xad, + 0x30, 0xf5, 0xdd, 0xc2, 0xdf, 0x8b, 0x90, 0xbe, 0x30, 0xad, 0x57, 0x98, 0x22, 0x19, 0xb2, 0x3e, + 0xfe, 0x6e, 0x86, 0x5d, 0x8b, 0x0f, 0x58, 0x34, 0x56, 0x36, 0xfa, 0x02, 0xf2, 0x3e, 0x99, 0x79, + 0x16, 0xee, 0x87, 0x35, 0xa3, 0x1a, 0xe5, 0x65, 0xa0, 0x20, 0x5e, 0x63, 0x03, 0x54, 0x0d, 0xe0, + 0xd6, 0x05, 0xf1, 0x28, 0xfa, 0x0a, 0x8a, 0x11, 0x16, 0x55, 0x66, 0x63, 0xcc, 0x35, 0x3e, 0x58, + 0x06, 0xca, 0xd1, 0x56, 0x6c, 0x84, 0xab, 0x46, 0x81, 0x3b, 0xe2, 0xa5, 0x7b, 0x0e, 0x25, 0x1b, + 0xfb, 0xd4, 0x71, 0x4d, 0x36, 0x17, 0x56, 0x5f, 0x64, 0x39, 0x3e, 0x5c, 0x06, 0xca, 0x03, 0x9e, + 0x63, 0x97, 0xa1, 0x1a, 0x07, 0x1b, 0x2e, 0xa6, 0xa4, 0x0b, 0xef, 0x6d, 0xb2, 0x62, 0x39, 0x6c, + 0x8c, 0x8d, 0xca, 0x32, 0x50, 0xe4, 0x77, 0x53, 0xad, 0x34, 0xa1, 0x0d, 0x6f, 0x2c, 0x0c, 0x81, + 0x68, 0x9b, 0xd4, 0x64, 0xe3, 0xde, 0x37, 0xd8, 0x19, 0x0d, 0xa0, 0x48, 0x9d, 0x09, 0x26, 0x33, + 0xda, 0x1f, 0x61, 0x67, 0x38, 0xa2, 0x6c, 0xe0, 0xf9, 0x9d, 0x9d, 0xe7, 0xdf, 0xa5, 0xeb, 0xd3, + 0x5a, 0x9b, 0x71, 0x1a, 0x0f, 0xc3, 0x75, 0x5d, 0x37, 0x64, 0x3b, 0x83, 0x6a, 0x14, 0x22, 0x07, + 0x67, 0x23, 0x1d, 0x0e, 0x63, 0x46, 0xf8, 0xeb, 0x53, 0x73, 0x32, 0x95, 0xb2, 0xe1, 0xc0, 0x1a, + 0xc7, 0xcb, 0x40, 0x91, 0xb6, 0x93, 0xac, 0x28, 0xaa, 0x51, 0x8a, 0x7c, 0xbd, 0xd8, 0x15, 0xed, + 0xc0, 0xcf, 0x02, 0xe4, 0xf9, 0x0e, 0xb0, 0x7f, 0xee, 0x7f, 0xb0, 0x7c, 0x5b, 0xbb, 0xb6, 0xb7, + 0xb3, 0x6b, 0x71, 0x5f, 0xc5, 0x75, 0x5f, 0x23, 0xa1, 0x5d, 0x38, 0x38, 0xb7, 0x5e, 0xb9, 0xe4, + 0x66, 0x8c, 0xed, 0x21, 0x9e, 0x60, 0x97, 0x22, 0x09, 0xd2, 0x1e, 0xf6, 0x67, 0x63, 0x2a, 0x1d, + 0x85, 0xf4, 0x76, 0xc2, 0x88, 0x6c, 0x54, 0x86, 0x14, 0xf6, 0x3c, 0xe2, 0x49, 0xe5, 0x50, 0x53, + 0x3b, 0x61, 0x70, 0xb3, 0x01, 0x90, 0xf5, 0xb0, 0x3f, 0x25, 0xae, 0x8f, 0x1f, 0xff, 0x2a, 0x40, + 0x8a, 0xdf, 0xf9, 0x73, 0x50, 0x2e, 0x7b, 0xe7, 0x3d, 0xad, 0x7f, 0xd5, 0xd1, 0x3b, 0x7a, 0x4f, + 0x3f, 0x7f, 0xa1, 0xbf, 0xd4, 0x5a, 0xfd, 0xab, 0xce, 0xe5, 0x85, 0xd6, 0xd4, 0x9f, 0xeb, 0x5a, + 0xab, 0x94, 0x90, 0x0f, 0xe7, 0x8b, 0x6a, 0x61, 0x8b, 0x80, 0x24, 0x00, 0x1e, 0x17, 0x3a, 0x4b, + 0x82, 0x9c, 0x9d, 0x2f, 0xaa, 0x62, 0x78, 0x46, 0x15, 0x28, 0x70, 0xa4, 0x67, 0x7c, 0xd3, 0xbd, + 0xd0, 0x3a, 0xa5, 0xa4, 0x9c, 0x9f, 0x2f, 0xaa, 0x99, 0xc8, 0x5c, 0x47, 0x32, 0x70, 0x8f, 0x47, + 0x32, 0xe4, 0x18, 0xf6, 0x39, 0xd2, 0x7c, 0xd1, 0xbd, 0xd4, 0x5a, 0x25, 0x51, 0x86, 0xf9, 0xa2, + 0x9a, 0xe6, 0x96, 0x2c, 0xbe, 0xfe, 0xa9, 0x92, 0x78, 0x7c, 0x03, 0x29, 0xf6, 0xbd, 0x44, 0x1f, + 0x43, 0xb9, 0x6b, 0xb4, 0x34, 0xa3, 0xdf, 0xe9, 0x76, 0xb4, 0x1d, 0xbd, 0x2c, 0x65, 0xe8, 0x47, + 0x2a, 0x1c, 0x70, 0xd6, 0x55, 0x87, 0xfd, 0x6a, 0xad, 0x92, 0x20, 0x17, 0xe6, 0x8b, 0x6a, 0x6e, + 0xe5, 0x08, 0x05, 0x73, 0x4e, 0xcc, 0x88, 0x04, 0x47, 0x26, 0x2f, 0xdc, 0x68, 0xbf, 0xb9, 0xab, + 0x08, 0x6f, 0xef, 0x2a, 0xc2, 0xef, 0x77, 0x15, 0xe1, 0x87, 0xfb, 0x4a, 0xe2, 0xed, 0x7d, 0x25, + 0xf1, 0xdb, 0x7d, 0x25, 0xf1, 0xb2, 0x36, 0x74, 0xe8, 0x68, 0x36, 0xa8, 0x59, 0x64, 0x52, 0xb7, + 0x88, 0x3f, 0x21, 0x7e, 0xdd, 0x19, 0x58, 0x4f, 0xe2, 0x77, 0xf9, 0xd3, 0xa7, 0x4f, 0xe2, 0xc7, + 0x9e, 0xde, 0x4e, 0xb1, 0x3f, 0x48, 0xb3, 0x87, 0xf9, 0xb3, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, + 0xbc, 0xc9, 0x53, 0x80, 0x0f, 0x08, 0x00, 0x00, +} + +func (m *Channel) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Channel) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Channel) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintChannel(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x2a + } + if len(m.ConnectionHops) > 0 { + for iNdEx := len(m.ConnectionHops) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConnectionHops[iNdEx]) + copy(dAtA[i:], m.ConnectionHops[iNdEx]) + i = encodeVarintChannel(dAtA, i, uint64(len(m.ConnectionHops[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintChannel(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Ordering != 0 { + i = encodeVarintChannel(dAtA, i, uint64(m.Ordering)) + i-- + dAtA[i] = 0x10 + } + if m.State != 0 { + i = encodeVarintChannel(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *IdentifiedChannel) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IdentifiedChannel) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IdentifiedChannel) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintChannel(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x3a + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintChannel(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0x32 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintChannel(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x2a + } + if len(m.ConnectionHops) > 0 { + for iNdEx := len(m.ConnectionHops) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ConnectionHops[iNdEx]) + copy(dAtA[i:], m.ConnectionHops[iNdEx]) + i = encodeVarintChannel(dAtA, i, uint64(len(m.ConnectionHops[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Counterparty.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintChannel(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Ordering != 0 { + i = encodeVarintChannel(dAtA, i, uint64(m.Ordering)) + i-- + dAtA[i] = 0x10 + } + if m.State != 0 { + i = encodeVarintChannel(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Counterparty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counterparty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counterparty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintChannel(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintChannel(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Packet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Packet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Packet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeoutTimestamp != 0 { + i = encodeVarintChannel(dAtA, i, uint64(m.TimeoutTimestamp)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.TimeoutHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintChannel(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintChannel(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x32 + } + if len(m.DestinationChannel) > 0 { + i -= len(m.DestinationChannel) + copy(dAtA[i:], m.DestinationChannel) + i = encodeVarintChannel(dAtA, i, uint64(len(m.DestinationChannel))) + i-- + dAtA[i] = 0x2a + } + if len(m.DestinationPort) > 0 { + i -= len(m.DestinationPort) + copy(dAtA[i:], m.DestinationPort) + i = encodeVarintChannel(dAtA, i, uint64(len(m.DestinationPort))) + i-- + dAtA[i] = 0x22 + } + if len(m.SourceChannel) > 0 { + i -= len(m.SourceChannel) + copy(dAtA[i:], m.SourceChannel) + i = encodeVarintChannel(dAtA, i, uint64(len(m.SourceChannel))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourcePort) > 0 { + i -= len(m.SourcePort) + copy(dAtA[i:], m.SourcePort) + i = encodeVarintChannel(dAtA, i, uint64(len(m.SourcePort))) + i-- + dAtA[i] = 0x12 + } + if m.Sequence != 0 { + i = encodeVarintChannel(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PacketState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintChannel(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 + } + if m.Sequence != 0 { + i = encodeVarintChannel(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintChannel(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintChannel(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Acknowledgement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Acknowledgement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Acknowledgement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size := m.Response.Size() + i -= size + if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Acknowledgement_Result) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Acknowledgement_Result) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Result != nil { + i -= len(m.Result) + copy(dAtA[i:], m.Result) + i = encodeVarintChannel(dAtA, i, uint64(len(m.Result))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + return len(dAtA) - i, nil +} +func (m *Acknowledgement_Error) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Acknowledgement_Error) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintChannel(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + return len(dAtA) - i, nil +} +func encodeVarintChannel(dAtA []byte, offset int, v uint64) int { + offset -= sovChannel(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Channel) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovChannel(uint64(m.State)) + } + if m.Ordering != 0 { + n += 1 + sovChannel(uint64(m.Ordering)) + } + l = m.Counterparty.Size() + n += 1 + l + sovChannel(uint64(l)) + if len(m.ConnectionHops) > 0 { + for _, s := range m.ConnectionHops { + l = len(s) + n += 1 + l + sovChannel(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + return n +} + +func (m *IdentifiedChannel) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovChannel(uint64(m.State)) + } + if m.Ordering != 0 { + n += 1 + sovChannel(uint64(m.Ordering)) + } + l = m.Counterparty.Size() + n += 1 + l + sovChannel(uint64(l)) + if len(m.ConnectionHops) > 0 { + for _, s := range m.ConnectionHops { + l = len(s) + n += 1 + l + sovChannel(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + return n +} + +func (m *Counterparty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + return n +} + +func (m *Packet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovChannel(uint64(m.Sequence)) + } + l = len(m.SourcePort) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = len(m.SourceChannel) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = len(m.DestinationPort) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = len(m.DestinationChannel) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = m.TimeoutHeight.Size() + n += 1 + l + sovChannel(uint64(l)) + if m.TimeoutTimestamp != 0 { + n += 1 + sovChannel(uint64(m.TimeoutTimestamp)) + } + return n +} + +func (m *PacketState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovChannel(uint64(m.Sequence)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovChannel(uint64(l)) + } + return n +} + +func (m *Acknowledgement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Response != nil { + n += m.Response.Size() + } + return n +} + +func (m *Acknowledgement_Result) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = len(m.Result) + n += 2 + l + sovChannel(uint64(l)) + } + return n +} +func (m *Acknowledgement_Error) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + n += 2 + l + sovChannel(uint64(l)) + return n +} + +func sovChannel(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozChannel(x uint64) (n int) { + return sovChannel(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Channel) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Channel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Channel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordering", wireType) + } + m.Ordering = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Ordering |= Order(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionHops", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionHops = append(m.ConnectionHops, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChannel(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthChannel + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IdentifiedChannel) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IdentifiedChannel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IdentifiedChannel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ordering", wireType) + } + m.Ordering = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Ordering |= Order(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counterparty", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Counterparty.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionHops", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionHops = append(m.ConnectionHops, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChannel(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthChannel + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counterparty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counterparty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counterparty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChannel(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthChannel + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Packet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Packet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Packet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePort", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePort = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceChannel", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceChannel = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationPort", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationPort = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationChannel", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationChannel = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimeoutHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutTimestamp", wireType) + } + m.TimeoutTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutTimestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipChannel(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthChannel + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChannel(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthChannel + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Acknowledgement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Acknowledgement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Acknowledgement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.Response = &Acknowledgement_Result{v} + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChannel + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChannel + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChannel + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Response = &Acknowledgement_Error{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChannel(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthChannel + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipChannel(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChannel + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChannel + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChannel + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthChannel + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupChannel + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthChannel + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthChannel = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowChannel = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupChannel = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/04-channel/types/channel_test.go b/core/04-channel/types/channel_test.go new file mode 100644 index 0000000000..30fee4443b --- /dev/null +++ b/core/04-channel/types/channel_test.go @@ -0,0 +1,119 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +func TestChannelValidateBasic(t *testing.T) { + counterparty := types.Counterparty{"portidone", "channelidone"} + testCases := []struct { + name string + channel types.Channel + expPass bool + }{ + {"valid channel", types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, connHops, version), true}, + {"invalid state", types.NewChannel(types.UNINITIALIZED, types.ORDERED, counterparty, connHops, version), false}, + {"invalid order", types.NewChannel(types.TRYOPEN, types.NONE, counterparty, connHops, version), false}, + {"more than 1 connection hop", types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, []string{"connection1", "connection2"}, version), false}, + {"invalid connection hop identifier", types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, []string{"(invalid)"}, version), false}, + {"invalid counterparty", types.NewChannel(types.TRYOPEN, types.ORDERED, types.NewCounterparty("(invalidport)", "channelidone"), connHops, version), false}, + } + + for i, tc := range testCases { + tc := tc + + err := tc.channel.ValidateBasic() + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +func TestCounterpartyValidateBasic(t *testing.T) { + testCases := []struct { + name string + counterparty types.Counterparty + expPass bool + }{ + {"valid counterparty", types.Counterparty{"portidone", "channelidone"}, true}, + {"invalid port id", types.Counterparty{"(InvalidPort)", "channelidone"}, false}, + {"invalid channel id", types.Counterparty{"portidone", "(InvalidChannel)"}, false}, + } + + for i, tc := range testCases { + tc := tc + + err := tc.counterparty.ValidateBasic() + if tc.expPass { + require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) + } else { + require.Error(t, err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +// tests acknowledgement.ValidateBasic and acknowledgement.GetBytes +func (suite TypesTestSuite) TestAcknowledgement() { + testCases := []struct { + name string + ack types.Acknowledgement + expPass bool + }{ + { + "valid successful ack", + types.NewResultAcknowledgement([]byte("success")), + true, + }, + { + "valid failed ack", + types.NewErrorAcknowledgement("error"), + true, + }, + { + "empty successful ack", + types.NewResultAcknowledgement([]byte{}), + false, + }, + { + "empty faied ack", + types.NewErrorAcknowledgement(" "), + false, + }, + { + "nil response", + types.Acknowledgement{ + Response: nil, + }, + false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + err := tc.ack.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + + // expect all acks to be able to be marshaled + suite.NotPanics(func() { + bz := tc.ack.GetBytes() + suite.Require().NotNil(bz) + }) + }) + } + +} diff --git a/core/04-channel/types/codec.go b/core/04-channel/types/codec.go new file mode 100644 index 0000000000..a74f0a7fc9 --- /dev/null +++ b/core/04-channel/types/codec.go @@ -0,0 +1,60 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// RegisterInterfaces register the ibc channel submodule interfaces to protobuf +// Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterInterface( + "ibc.core.channel.v1.ChannelI", + (*exported.ChannelI)(nil), + ) + registry.RegisterInterface( + "ibc.core.channel.v1.CounterpartyChannelI", + (*exported.CounterpartyChannelI)(nil), + ) + registry.RegisterInterface( + "ibc.core.channel.v1.PacketI", + (*exported.PacketI)(nil), + ) + registry.RegisterImplementations( + (*exported.ChannelI)(nil), + &Channel{}, + ) + registry.RegisterImplementations( + (*exported.CounterpartyChannelI)(nil), + &Counterparty{}, + ) + registry.RegisterImplementations( + (*exported.PacketI)(nil), + &Packet{}, + ) + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgChannelOpenInit{}, + &MsgChannelOpenTry{}, + &MsgChannelOpenAck{}, + &MsgChannelOpenConfirm{}, + &MsgChannelCloseInit{}, + &MsgChannelCloseConfirm{}, + &MsgRecvPacket{}, + &MsgAcknowledgement{}, + &MsgTimeout{}, + &MsgTimeoutOnClose{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +// SubModuleCdc references the global x/ibc/core/04-channel module codec. Note, the codec should +// ONLY be used in certain instances of tests and for JSON encoding. +// +// The actual codec used for serialization should be provided to x/ibc/core/04-channel and +// defined at the application level. +var SubModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) diff --git a/core/04-channel/types/errors.go b/core/04-channel/types/errors.go new file mode 100644 index 0000000000..82cf773057 --- /dev/null +++ b/core/04-channel/types/errors.go @@ -0,0 +1,28 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// IBC channel sentinel errors +var ( + ErrChannelExists = sdkerrors.Register(SubModuleName, 2, "channel already exists") + ErrChannelNotFound = sdkerrors.Register(SubModuleName, 3, "channel not found") + ErrInvalidChannel = sdkerrors.Register(SubModuleName, 4, "invalid channel") + ErrInvalidChannelState = sdkerrors.Register(SubModuleName, 5, "invalid channel state") + ErrInvalidChannelOrdering = sdkerrors.Register(SubModuleName, 6, "invalid channel ordering") + ErrInvalidCounterparty = sdkerrors.Register(SubModuleName, 7, "invalid counterparty channel") + ErrInvalidChannelCapability = sdkerrors.Register(SubModuleName, 8, "invalid channel capability") + ErrChannelCapabilityNotFound = sdkerrors.Register(SubModuleName, 9, "channel capability not found") + ErrSequenceSendNotFound = sdkerrors.Register(SubModuleName, 10, "sequence send not found") + ErrSequenceReceiveNotFound = sdkerrors.Register(SubModuleName, 11, "sequence receive not found") + ErrSequenceAckNotFound = sdkerrors.Register(SubModuleName, 12, "sequence acknowledgement not found") + ErrInvalidPacket = sdkerrors.Register(SubModuleName, 13, "invalid packet") + ErrPacketTimeout = sdkerrors.Register(SubModuleName, 14, "packet timeout") + ErrTooManyConnectionHops = sdkerrors.Register(SubModuleName, 15, "too many connection hops") + ErrInvalidAcknowledgement = sdkerrors.Register(SubModuleName, 16, "invalid acknowledgement") + ErrPacketCommitmentNotFound = sdkerrors.Register(SubModuleName, 17, "packet commitment not found") + ErrPacketReceived = sdkerrors.Register(SubModuleName, 18, "packet already received") + ErrAcknowledgementExists = sdkerrors.Register(SubModuleName, 19, "acknowledgement for packet already exists") + ErrInvalidChannelIdentifier = sdkerrors.Register(SubModuleName, 20, "invalid channel identifier") +) diff --git a/core/04-channel/types/events.go b/core/04-channel/types/events.go new file mode 100644 index 0000000000..b9ddb3052c --- /dev/null +++ b/core/04-channel/types/events.go @@ -0,0 +1,46 @@ +package types + +import ( + "fmt" + + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// IBC channel events +const ( + AttributeKeyConnectionID = "connection_id" + AttributeKeyPortID = "port_id" + AttributeKeyChannelID = "channel_id" + AttributeCounterpartyPortID = "counterparty_port_id" + AttributeCounterpartyChannelID = "counterparty_channel_id" + + EventTypeSendPacket = "send_packet" + EventTypeRecvPacket = "recv_packet" + EventTypeWriteAck = "write_acknowledgement" + EventTypeAcknowledgePacket = "acknowledge_packet" + EventTypeTimeoutPacket = "timeout_packet" + + AttributeKeyData = "packet_data" + AttributeKeyAck = "packet_ack" + AttributeKeyTimeoutHeight = "packet_timeout_height" + AttributeKeyTimeoutTimestamp = "packet_timeout_timestamp" + AttributeKeySequence = "packet_sequence" + AttributeKeySrcPort = "packet_src_port" + AttributeKeySrcChannel = "packet_src_channel" + AttributeKeyDstPort = "packet_dst_port" + AttributeKeyDstChannel = "packet_dst_channel" + AttributeKeyChannelOrdering = "packet_channel_ordering" + AttributeKeyConnection = "packet_connection" +) + +// IBC channel events vars +var ( + EventTypeChannelOpenInit = MsgChannelOpenInit{}.Type() + EventTypeChannelOpenTry = MsgChannelOpenTry{}.Type() + EventTypeChannelOpenAck = MsgChannelOpenAck{}.Type() + EventTypeChannelOpenConfirm = MsgChannelOpenConfirm{}.Type() + EventTypeChannelCloseInit = MsgChannelCloseInit{}.Type() + EventTypeChannelCloseConfirm = MsgChannelCloseConfirm{}.Type() + + AttributeValueCategory = fmt.Sprintf("%s_%s", host.ModuleName, SubModuleName) +) diff --git a/core/04-channel/types/expected_keepers.go b/core/04-channel/types/expected_keepers.go new file mode 100644 index 0000000000..d3b74b7e29 --- /dev/null +++ b/core/04-channel/types/expected_keepers.go @@ -0,0 +1,76 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// ClientKeeper expected account IBC client keeper +type ClientKeeper interface { + GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool) + GetClientConsensusState(ctx sdk.Context, clientID string, height exported.Height) (exported.ConsensusState, bool) +} + +// ConnectionKeeper expected account IBC connection keeper +type ConnectionKeeper interface { + GetConnection(ctx sdk.Context, connectionID string) (connectiontypes.ConnectionEnd, bool) + GetTimestampAtHeight( + ctx sdk.Context, + connection connectiontypes.ConnectionEnd, + height exported.Height, + ) (uint64, error) + VerifyChannelState( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + channel exported.ChannelI, + ) error + VerifyPacketCommitment( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + sequence uint64, + commitmentBytes []byte, + ) error + VerifyPacketAcknowledgement( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + sequence uint64, + acknowledgement []byte, + ) error + VerifyPacketReceiptAbsence( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + sequence uint64, + ) error + VerifyNextSequenceRecv( + ctx sdk.Context, + connection exported.ConnectionI, + height exported.Height, + proof []byte, + portID, + channelID string, + nextSequenceRecv uint64, + ) error +} + +// PortKeeper expected account IBC port keeper +type PortKeeper interface { + Authenticate(ctx sdk.Context, key *capabilitytypes.Capability, portID string) bool +} diff --git a/core/04-channel/types/genesis.go b/core/04-channel/types/genesis.go new file mode 100644 index 0000000000..2c431e97b3 --- /dev/null +++ b/core/04-channel/types/genesis.go @@ -0,0 +1,156 @@ +package types + +import ( + "errors" + "fmt" + + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// NewPacketState creates a new PacketState instance. +func NewPacketState(portID, channelID string, seq uint64, data []byte) PacketState { + return PacketState{ + PortId: portID, + ChannelId: channelID, + Sequence: seq, + Data: data, + } +} + +// Validate performs basic validation of fields returning an error upon any +// failure. +func (pa PacketState) Validate() error { + if pa.Data == nil { + return errors.New("data bytes cannot be nil") + } + return validateGenFields(pa.PortId, pa.ChannelId, pa.Sequence) +} + +// NewPacketSequence creates a new PacketSequences instance. +func NewPacketSequence(portID, channelID string, seq uint64) PacketSequence { + return PacketSequence{ + PortId: portID, + ChannelId: channelID, + Sequence: seq, + } +} + +// Validate performs basic validation of fields returning an error upon any +// failure. +func (ps PacketSequence) Validate() error { + return validateGenFields(ps.PortId, ps.ChannelId, ps.Sequence) +} + +// NewGenesisState creates a GenesisState instance. +func NewGenesisState( + channels []IdentifiedChannel, acks, receipts, commitments []PacketState, + sendSeqs, recvSeqs, ackSeqs []PacketSequence, nextChannelSequence uint64, +) GenesisState { + return GenesisState{ + Channels: channels, + Acknowledgements: acks, + Commitments: commitments, + SendSequences: sendSeqs, + RecvSequences: recvSeqs, + AckSequences: ackSeqs, + NextChannelSequence: nextChannelSequence, + } +} + +// DefaultGenesisState returns the ibc channel submodule's default genesis state. +func DefaultGenesisState() GenesisState { + return GenesisState{ + Channels: []IdentifiedChannel{}, + Acknowledgements: []PacketState{}, + Receipts: []PacketState{}, + Commitments: []PacketState{}, + SendSequences: []PacketSequence{}, + RecvSequences: []PacketSequence{}, + AckSequences: []PacketSequence{}, + NextChannelSequence: 0, + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + // keep track of the max sequence to ensure it is less than + // the next sequence used in creating connection identifers. + var maxSequence uint64 = 0 + + for i, channel := range gs.Channels { + sequence, err := ParseChannelSequence(channel.ChannelId) + if err != nil { + return err + } + + if sequence > maxSequence { + maxSequence = sequence + } + + if err := channel.ValidateBasic(); err != nil { + return fmt.Errorf("invalid channel %v channel index %d: %w", channel, i, err) + } + } + + if maxSequence != 0 && maxSequence >= gs.NextChannelSequence { + return fmt.Errorf("next channel sequence %d must be greater than maximum sequence used in channel identifier %d", gs.NextChannelSequence, maxSequence) + } + + for i, ack := range gs.Acknowledgements { + if err := ack.Validate(); err != nil { + return fmt.Errorf("invalid acknowledgement %v ack index %d: %w", ack, i, err) + } + if len(ack.Data) == 0 { + return fmt.Errorf("invalid acknowledgement %v ack index %d: data bytes cannot be empty", ack, i) + } + } + + for i, receipt := range gs.Receipts { + if err := receipt.Validate(); err != nil { + return fmt.Errorf("invalid acknowledgement %v ack index %d: %w", receipt, i, err) + } + } + + for i, commitment := range gs.Commitments { + if err := commitment.Validate(); err != nil { + return fmt.Errorf("invalid commitment %v index %d: %w", commitment, i, err) + } + if len(commitment.Data) == 0 { + return fmt.Errorf("invalid acknowledgement %v ack index %d: data bytes cannot be empty", commitment, i) + } + } + + for i, ss := range gs.SendSequences { + if err := ss.Validate(); err != nil { + return fmt.Errorf("invalid send sequence %v index %d: %w", ss, i, err) + } + } + + for i, rs := range gs.RecvSequences { + if err := rs.Validate(); err != nil { + return fmt.Errorf("invalid receive sequence %v index %d: %w", rs, i, err) + } + } + + for i, as := range gs.AckSequences { + if err := as.Validate(); err != nil { + return fmt.Errorf("invalid acknowledgement sequence %v index %d: %w", as, i, err) + } + } + + return nil +} + +func validateGenFields(portID, channelID string, sequence uint64) error { + if err := host.PortIdentifierValidator(portID); err != nil { + return fmt.Errorf("invalid port Id: %w", err) + } + if err := host.ChannelIdentifierValidator(channelID); err != nil { + return fmt.Errorf("invalid channel Id: %w", err) + } + if sequence == 0 { + return errors.New("sequence cannot be 0") + } + return nil +} diff --git a/core/04-channel/types/genesis.pb.go b/core/04-channel/types/genesis.pb.go new file mode 100644 index 0000000000..9c55fc2084 --- /dev/null +++ b/core/04-channel/types/genesis.pb.go @@ -0,0 +1,1017 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/channel/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ibc channel submodule's genesis state. +type GenesisState struct { + Channels []IdentifiedChannel `protobuf:"bytes,1,rep,name=channels,proto3,casttype=IdentifiedChannel" json:"channels"` + Acknowledgements []PacketState `protobuf:"bytes,2,rep,name=acknowledgements,proto3" json:"acknowledgements"` + Commitments []PacketState `protobuf:"bytes,3,rep,name=commitments,proto3" json:"commitments"` + Receipts []PacketState `protobuf:"bytes,4,rep,name=receipts,proto3" json:"receipts"` + SendSequences []PacketSequence `protobuf:"bytes,5,rep,name=send_sequences,json=sendSequences,proto3" json:"send_sequences" yaml:"send_sequences"` + RecvSequences []PacketSequence `protobuf:"bytes,6,rep,name=recv_sequences,json=recvSequences,proto3" json:"recv_sequences" yaml:"recv_sequences"` + AckSequences []PacketSequence `protobuf:"bytes,7,rep,name=ack_sequences,json=ackSequences,proto3" json:"ack_sequences" yaml:"ack_sequences"` + // the sequence for the next generated channel identifier + NextChannelSequence uint64 `protobuf:"varint,8,opt,name=next_channel_sequence,json=nextChannelSequence,proto3" json:"next_channel_sequence,omitempty" yaml:"next_channel_sequence"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_c4d4e081eaaab7c3, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetChannels() []IdentifiedChannel { + if m != nil { + return m.Channels + } + return nil +} + +func (m *GenesisState) GetAcknowledgements() []PacketState { + if m != nil { + return m.Acknowledgements + } + return nil +} + +func (m *GenesisState) GetCommitments() []PacketState { + if m != nil { + return m.Commitments + } + return nil +} + +func (m *GenesisState) GetReceipts() []PacketState { + if m != nil { + return m.Receipts + } + return nil +} + +func (m *GenesisState) GetSendSequences() []PacketSequence { + if m != nil { + return m.SendSequences + } + return nil +} + +func (m *GenesisState) GetRecvSequences() []PacketSequence { + if m != nil { + return m.RecvSequences + } + return nil +} + +func (m *GenesisState) GetAckSequences() []PacketSequence { + if m != nil { + return m.AckSequences + } + return nil +} + +func (m *GenesisState) GetNextChannelSequence() uint64 { + if m != nil { + return m.NextChannelSequence + } + return 0 +} + +// PacketSequence defines the genesis type necessary to retrieve and store +// next send and receive sequences. +type PacketSequence struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` +} + +func (m *PacketSequence) Reset() { *m = PacketSequence{} } +func (m *PacketSequence) String() string { return proto.CompactTextString(m) } +func (*PacketSequence) ProtoMessage() {} +func (*PacketSequence) Descriptor() ([]byte, []int) { + return fileDescriptor_c4d4e081eaaab7c3, []int{1} +} +func (m *PacketSequence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketSequence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketSequence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketSequence) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketSequence.Merge(m, src) +} +func (m *PacketSequence) XXX_Size() int { + return m.Size() +} +func (m *PacketSequence) XXX_DiscardUnknown() { + xxx_messageInfo_PacketSequence.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketSequence proto.InternalMessageInfo + +func (m *PacketSequence) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *PacketSequence) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *PacketSequence) GetSequence() uint64 { + if m != nil { + return m.Sequence + } + return 0 +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibcgo.core.channel.v1.GenesisState") + proto.RegisterType((*PacketSequence)(nil), "ibcgo.core.channel.v1.PacketSequence") +} + +func init() { + proto.RegisterFile("ibcgo/core/channel/v1/genesis.proto", fileDescriptor_c4d4e081eaaab7c3) +} + +var fileDescriptor_c4d4e081eaaab7c3 = []byte{ + // 497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcf, 0x6e, 0xd3, 0x30, + 0x1c, 0x80, 0x9b, 0xb5, 0xeb, 0x3a, 0x6f, 0xad, 0x98, 0x59, 0xa5, 0x50, 0x8d, 0xa4, 0x32, 0x42, + 0xaa, 0x84, 0x96, 0x30, 0xd8, 0x89, 0x63, 0x40, 0x82, 0x72, 0x42, 0x61, 0x27, 0x2e, 0x55, 0xea, + 0xfc, 0x48, 0xad, 0x36, 0x71, 0x89, 0xbd, 0xc2, 0x9e, 0x02, 0x1e, 0x6b, 0xc7, 0x1d, 0x39, 0x45, + 0xa8, 0x7d, 0x02, 0x7a, 0xe4, 0x84, 0xe2, 0xfc, 0x69, 0xab, 0x6d, 0x48, 0x15, 0xb7, 0xd8, 0xfe, + 0xfc, 0x7d, 0xbf, 0x43, 0x8c, 0x9e, 0xb0, 0x21, 0x0d, 0xb8, 0x4d, 0x79, 0x0c, 0x36, 0x1d, 0x79, + 0x51, 0x04, 0x13, 0x7b, 0x76, 0x66, 0x07, 0x10, 0x81, 0x60, 0xc2, 0x9a, 0xc6, 0x5c, 0x72, 0xdc, + 0x56, 0x90, 0x95, 0x42, 0x56, 0x0e, 0x59, 0xb3, 0xb3, 0xce, 0x71, 0xc0, 0x03, 0xae, 0x08, 0x3b, + 0xfd, 0xca, 0xe0, 0xce, 0x3d, 0xc6, 0xe2, 0x9e, 0x82, 0xc8, 0xef, 0x5d, 0x74, 0xf8, 0x36, 0x6b, + 0x7c, 0x94, 0x9e, 0x04, 0x3c, 0x40, 0x8d, 0x9c, 0x10, 0xba, 0xd6, 0xad, 0xf6, 0x0e, 0x5e, 0xf4, + 0xac, 0x3b, 0xab, 0x56, 0xdf, 0x87, 0x48, 0xb2, 0xcf, 0x0c, 0xfc, 0xd7, 0xd9, 0xa6, 0xf3, 0xe8, + 0x3a, 0x31, 0x2b, 0x7f, 0x12, 0xf3, 0xe8, 0xd6, 0x91, 0x5b, 0x4a, 0xf1, 0x05, 0x7a, 0xe0, 0xd1, + 0x71, 0xc4, 0xbf, 0x4e, 0xc0, 0x0f, 0x20, 0x84, 0x48, 0x0a, 0x7d, 0x47, 0x85, 0xc8, 0x3d, 0xa1, + 0x0f, 0x1e, 0x1d, 0x83, 0x54, 0xe3, 0x39, 0xb5, 0x34, 0xe1, 0xde, 0x32, 0xe0, 0xf7, 0xe8, 0x80, + 0xf2, 0x30, 0x64, 0x32, 0x13, 0x56, 0xb7, 0x14, 0xae, 0x5f, 0xc6, 0x6f, 0x50, 0x23, 0x06, 0x0a, + 0x6c, 0x2a, 0x85, 0x5e, 0xdb, 0x52, 0x54, 0xde, 0xc4, 0x63, 0xd4, 0x12, 0x10, 0xf9, 0x03, 0x01, + 0x5f, 0x2e, 0x21, 0xa2, 0x20, 0xf4, 0x5d, 0xe5, 0x7a, 0xfa, 0x6f, 0x57, 0x4e, 0x3b, 0x8f, 0x53, + 0xdd, 0x32, 0x31, 0xdb, 0x57, 0x5e, 0x38, 0x79, 0x45, 0x36, 0x55, 0xc4, 0x6d, 0xa6, 0x1b, 0x05, + 0xac, 0x62, 0x31, 0xd0, 0xd9, 0x5a, 0xac, 0xfe, 0x1f, 0xb1, 0x4d, 0x15, 0x71, 0x9b, 0xe9, 0xc6, + 0x2a, 0x36, 0x42, 0x4d, 0x8f, 0x8e, 0xd7, 0x5a, 0x7b, 0xdb, 0xb4, 0x4e, 0xf2, 0xd6, 0x71, 0xd6, + 0xda, 0x30, 0x11, 0xf7, 0xd0, 0xa3, 0xe3, 0x55, 0xe9, 0x02, 0xb5, 0x23, 0xf8, 0x26, 0x07, 0xb9, + 0xad, 0x04, 0xf5, 0x46, 0x57, 0xeb, 0xd5, 0x9c, 0xee, 0x32, 0x31, 0x4f, 0x32, 0xcd, 0x9d, 0x18, + 0x71, 0x1f, 0xa6, 0xfb, 0xf9, 0x3f, 0x58, 0x68, 0xc9, 0x77, 0x0d, 0xb5, 0x36, 0x87, 0xc2, 0xcf, + 0xd0, 0xde, 0x94, 0xc7, 0x72, 0xc0, 0x7c, 0x5d, 0xeb, 0x6a, 0xbd, 0x7d, 0x07, 0x2f, 0x13, 0xb3, + 0x95, 0xa9, 0xf3, 0x03, 0xe2, 0xd6, 0xd3, 0xaf, 0xbe, 0x8f, 0xcf, 0x11, 0x2a, 0x4a, 0xcc, 0xd7, + 0x77, 0x14, 0xdf, 0x5e, 0x26, 0xe6, 0x51, 0xc6, 0xaf, 0xce, 0x88, 0xbb, 0x9f, 0x2f, 0xfa, 0x3e, + 0xee, 0xa0, 0x46, 0x39, 0x7e, 0x35, 0x1d, 0xdf, 0x2d, 0xd7, 0xce, 0xbb, 0xeb, 0xb9, 0xa1, 0xdd, + 0xcc, 0x0d, 0xed, 0xd7, 0xdc, 0xd0, 0x7e, 0x2c, 0x8c, 0xca, 0xcd, 0xc2, 0xa8, 0xfc, 0x5c, 0x18, + 0x95, 0x4f, 0x56, 0xc0, 0xe4, 0xe8, 0x72, 0x68, 0x51, 0x1e, 0xda, 0x94, 0x8b, 0x90, 0x0b, 0x9b, + 0x0d, 0xe9, 0x69, 0xf1, 0xae, 0x9f, 0x9f, 0x9f, 0x16, 0x4f, 0x5b, 0x5e, 0x4d, 0x41, 0x0c, 0xeb, + 0xea, 0x59, 0xbf, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x02, 0xd2, 0xd3, 0x2f, 0x4f, 0x04, 0x00, + 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextChannelSequence != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.NextChannelSequence)) + i-- + dAtA[i] = 0x40 + } + if len(m.AckSequences) > 0 { + for iNdEx := len(m.AckSequences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AckSequences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.RecvSequences) > 0 { + for iNdEx := len(m.RecvSequences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RecvSequences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.SendSequences) > 0 { + for iNdEx := len(m.SendSequences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SendSequences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Receipts) > 0 { + for iNdEx := len(m.Receipts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Receipts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Commitments) > 0 { + for iNdEx := len(m.Commitments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Commitments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Acknowledgements) > 0 { + for iNdEx := len(m.Acknowledgements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Acknowledgements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Channels) > 0 { + for iNdEx := len(m.Channels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Channels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PacketSequence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketSequence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketSequence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sequence != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Channels) > 0 { + for _, e := range m.Channels { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Acknowledgements) > 0 { + for _, e := range m.Acknowledgements { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Commitments) > 0 { + for _, e := range m.Commitments { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Receipts) > 0 { + for _, e := range m.Receipts { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.SendSequences) > 0 { + for _, e := range m.SendSequences { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.RecvSequences) > 0 { + for _, e := range m.RecvSequences { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.AckSequences) > 0 { + for _, e := range m.AckSequences { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if m.NextChannelSequence != 0 { + n += 1 + sovGenesis(uint64(m.NextChannelSequence)) + } + return n +} + +func (m *PacketSequence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovGenesis(uint64(m.Sequence)) + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Channels = append(m.Channels, IdentifiedChannel{}) + if err := m.Channels[len(m.Channels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Acknowledgements = append(m.Acknowledgements, PacketState{}) + if err := m.Acknowledgements[len(m.Acknowledgements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commitments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commitments = append(m.Commitments, PacketState{}) + if err := m.Commitments[len(m.Commitments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receipts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receipts = append(m.Receipts, PacketState{}) + if err := m.Receipts[len(m.Receipts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SendSequences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SendSequences = append(m.SendSequences, PacketSequence{}) + if err := m.SendSequences[len(m.SendSequences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecvSequences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RecvSequences = append(m.RecvSequences, PacketSequence{}) + if err := m.RecvSequences[len(m.RecvSequences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AckSequences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AckSequences = append(m.AckSequences, PacketSequence{}) + if err := m.AckSequences[len(m.AckSequences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextChannelSequence", wireType) + } + m.NextChannelSequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextChannelSequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketSequence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketSequence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketSequence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/04-channel/types/genesis_test.go b/core/04-channel/types/genesis_test.go new file mode 100644 index 0000000000..a0d21007a7 --- /dev/null +++ b/core/04-channel/types/genesis_test.go @@ -0,0 +1,225 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +const ( + testPort1 = "firstport" + testPort2 = "secondport" + testConnectionIDA = "connectionidatob" + + testChannel1 = "channel-0" + testChannel2 = "channel-1" + + testChannelOrder = types.ORDERED + testChannelVersion = "1.0" +) + +func TestValidateGenesis(t *testing.T) { + counterparty1 := types.NewCounterparty(testPort1, testChannel1) + counterparty2 := types.NewCounterparty(testPort2, testChannel2) + testCases := []struct { + name string + genState types.GenesisState + expPass bool + }{ + { + name: "default", + genState: types.DefaultGenesisState(), + expPass: true, + }, + { + name: "valid genesis", + genState: types.NewGenesisState( + []types.IdentifiedChannel{ + types.NewIdentifiedChannel( + testPort1, testChannel1, types.NewChannel( + types.INIT, testChannelOrder, counterparty2, []string{testConnectionIDA}, testChannelVersion, + ), + ), + types.NewIdentifiedChannel( + testPort2, testChannel2, types.NewChannel( + types.INIT, testChannelOrder, counterparty1, []string{testConnectionIDA}, testChannelVersion, + ), + ), + }, + []types.PacketState{ + types.NewPacketState(testPort2, testChannel2, 1, []byte("ack")), + }, + []types.PacketState{ + types.NewPacketState(testPort2, testChannel2, 1, []byte("")), + }, + []types.PacketState{ + types.NewPacketState(testPort1, testChannel1, 1, []byte("commit_hash")), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort1, testChannel1, 1), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort2, testChannel2, 1), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort2, testChannel2, 1), + }, + 2, + ), + expPass: true, + }, + { + name: "invalid channel", + genState: types.GenesisState{ + Channels: []types.IdentifiedChannel{ + types.NewIdentifiedChannel( + testPort1, "(testChannel1)", types.NewChannel( + types.INIT, testChannelOrder, counterparty2, []string{testConnectionIDA}, testChannelVersion, + ), + ), + }, + }, + expPass: false, + }, + { + name: "invalid ack", + genState: types.GenesisState{ + Acknowledgements: []types.PacketState{ + types.NewPacketState(testPort2, testChannel2, 1, nil), + }, + }, + expPass: false, + }, + { + name: "invalid commitment", + genState: types.GenesisState{ + Commitments: []types.PacketState{ + types.NewPacketState(testPort1, testChannel1, 1, nil), + }, + }, + expPass: false, + }, + { + name: "invalid send seq", + genState: types.GenesisState{ + SendSequences: []types.PacketSequence{ + types.NewPacketSequence(testPort1, testChannel1, 0), + }, + }, + expPass: false, + }, + { + name: "invalid recv seq", + genState: types.GenesisState{ + RecvSequences: []types.PacketSequence{ + types.NewPacketSequence(testPort1, "(testChannel1)", 1), + }, + }, + expPass: false, + }, + { + name: "invalid recv seq 2", + genState: types.GenesisState{ + RecvSequences: []types.PacketSequence{ + types.NewPacketSequence("(testPort1)", testChannel1, 1), + }, + }, + expPass: false, + }, + { + name: "invalid ack seq", + genState: types.GenesisState{ + AckSequences: []types.PacketSequence{ + types.NewPacketSequence(testPort1, "(testChannel1)", 1), + }, + }, + expPass: false, + }, + { + name: "invalid channel identifier", + genState: types.NewGenesisState( + []types.IdentifiedChannel{ + types.NewIdentifiedChannel( + testPort1, "chan-0", types.NewChannel( + types.INIT, testChannelOrder, counterparty2, []string{testConnectionIDA}, testChannelVersion, + ), + ), + types.NewIdentifiedChannel( + testPort2, testChannel2, types.NewChannel( + types.INIT, testChannelOrder, counterparty1, []string{testConnectionIDA}, testChannelVersion, + ), + ), + }, + []types.PacketState{ + types.NewPacketState(testPort2, testChannel2, 1, []byte("ack")), + }, + []types.PacketState{ + types.NewPacketState(testPort2, testChannel2, 1, []byte("")), + }, + []types.PacketState{ + types.NewPacketState(testPort1, testChannel1, 1, []byte("commit_hash")), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort1, testChannel1, 1), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort2, testChannel2, 1), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort2, testChannel2, 1), + }, + 0, + ), + expPass: false, + }, + { + name: "next channel sequence is less than maximum channel identifier sequence used", + genState: types.NewGenesisState( + []types.IdentifiedChannel{ + types.NewIdentifiedChannel( + testPort1, "channel-10", types.NewChannel( + types.INIT, testChannelOrder, counterparty2, []string{testConnectionIDA}, testChannelVersion, + ), + ), + types.NewIdentifiedChannel( + testPort2, testChannel2, types.NewChannel( + types.INIT, testChannelOrder, counterparty1, []string{testConnectionIDA}, testChannelVersion, + ), + ), + }, + []types.PacketState{ + types.NewPacketState(testPort2, testChannel2, 1, []byte("ack")), + }, + []types.PacketState{ + types.NewPacketState(testPort2, testChannel2, 1, []byte("")), + }, + []types.PacketState{ + types.NewPacketState(testPort1, testChannel1, 1, []byte("commit_hash")), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort1, testChannel1, 1), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort2, testChannel2, 1), + }, + []types.PacketSequence{ + types.NewPacketSequence(testPort2, testChannel2, 1), + }, + 0, + ), + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + err := tc.genState.Validate() + if tc.expPass { + require.NoError(t, err, tc.name) + } else { + require.Error(t, err, tc.name) + } + } +} diff --git a/core/04-channel/types/keys.go b/core/04-channel/types/keys.go new file mode 100644 index 0000000000..d3a6cde24d --- /dev/null +++ b/core/04-channel/types/keys.go @@ -0,0 +1,61 @@ +package types + +import ( + "fmt" + "regexp" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +const ( + // SubModuleName defines the IBC channels name + SubModuleName = "channel" + + // StoreKey is the store key string for IBC channels + StoreKey = SubModuleName + + // RouterKey is the message route for IBC channels + RouterKey = SubModuleName + + // QuerierRoute is the querier route for IBC channels + QuerierRoute = SubModuleName + + // KeyNextChannelSequence is the key used to store the next channel sequence in + // the keeper. + KeyNextChannelSequence = "nextChannelSequence" + + // ChannelPrefix is the prefix used when creating a channel identifier + ChannelPrefix = "channel-" +) + +// FormatChannelIdentifier returns the channel identifier with the sequence appended. +// This is a SDK specific format not enforced by IBC protocol. +func FormatChannelIdentifier(sequence uint64) string { + return fmt.Sprintf("%s%d", ChannelPrefix, sequence) +} + +// IsChannelIDFormat checks if a channelID is in the format required on the SDK for +// parsing channel identifiers. The channel identifier must be in the form: `channel-{N} +var IsChannelIDFormat = regexp.MustCompile(`^channel-[0-9]{1,20}$`).MatchString + +// IsValidChannelID checks if a channelID is valid and can be parsed to the channel +// identifier format. +func IsValidChannelID(channelID string) bool { + _, err := ParseChannelSequence(channelID) + return err == nil +} + +// ParseChannelSequence parses the channel sequence from the channel identifier. +func ParseChannelSequence(channelID string) (uint64, error) { + if !IsChannelIDFormat(channelID) { + return 0, sdkerrors.Wrap(host.ErrInvalidID, "channel identifier is not in the format: `channel-{N}`") + } + + sequence, err := host.ParseIdentifier(channelID, ChannelPrefix) + if err != nil { + return 0, sdkerrors.Wrap(err, "invalid channel identifier") + } + + return sequence, nil +} diff --git a/core/04-channel/types/keys_test.go b/core/04-channel/types/keys_test.go new file mode 100644 index 0000000000..9bc6500b9a --- /dev/null +++ b/core/04-channel/types/keys_test.go @@ -0,0 +1,47 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// tests ParseChannelSequence and IsValidChannelID +func TestParseChannelSequence(t *testing.T) { + testCases := []struct { + name string + channelID string + expSeq uint64 + expPass bool + }{ + {"valid 0", "channel-0", 0, true}, + {"valid 1", "channel-1", 1, true}, + {"valid large sequence", "channel-234568219356718293", 234568219356718293, true}, + // one above uint64 max + {"invalid uint64", "channel-18446744073709551616", 0, false}, + // uint64 == 20 characters + {"invalid large sequence", "channel-2345682193567182931243", 0, false}, + {"capital prefix", "Channel-0", 0, false}, + {"missing dash", "channel0", 0, false}, + {"blank id", " ", 0, false}, + {"empty id", "", 0, false}, + {"negative sequence", "channel--1", 0, false}, + } + + for _, tc := range testCases { + + seq, err := types.ParseChannelSequence(tc.channelID) + valid := types.IsValidChannelID(tc.channelID) + require.Equal(t, tc.expSeq, seq) + + if tc.expPass { + require.NoError(t, err, tc.name) + require.True(t, valid) + } else { + require.Error(t, err, tc.name) + require.False(t, valid) + } + } +} diff --git a/core/04-channel/types/msgs.go b/core/04-channel/types/msgs.go new file mode 100644 index 0000000000..da14a31030 --- /dev/null +++ b/core/04-channel/types/msgs.go @@ -0,0 +1,652 @@ +package types + +import ( + "encoding/base64" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +var _ sdk.Msg = &MsgChannelOpenInit{} + +// NewMsgChannelOpenInit creates a new MsgChannelOpenInit. It sets the counterparty channel +// identifier to be empty. +// nolint:interfacer +func NewMsgChannelOpenInit( + portID, version string, channelOrder Order, connectionHops []string, + counterpartyPortID string, signer sdk.AccAddress, +) *MsgChannelOpenInit { + counterparty := NewCounterparty(counterpartyPortID, "") + channel := NewChannel(INIT, channelOrder, counterparty, connectionHops, version) + return &MsgChannelOpenInit{ + PortId: portID, + Channel: channel, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgChannelOpenInit) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgChannelOpenInit) Type() string { + return "channel_open_init" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgChannelOpenInit) ValidateBasic() error { + if err := host.PortIdentifierValidator(msg.PortId); err != nil { + return sdkerrors.Wrap(err, "invalid port ID") + } + if msg.Channel.State != INIT { + return sdkerrors.Wrapf(ErrInvalidChannelState, + "channel state must be INIT in MsgChannelOpenInit. expected: %s, got: %s", + INIT, msg.Channel.State, + ) + } + if msg.Channel.Counterparty.ChannelId != "" { + return sdkerrors.Wrap(ErrInvalidCounterparty, "counterparty channel identifier must be empty") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return msg.Channel.ValidateBasic() +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgChannelOpenInit) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgChannelOpenInit) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +var _ sdk.Msg = &MsgChannelOpenTry{} + +// NewMsgChannelOpenTry creates a new MsgChannelOpenTry instance +// nolint:interfacer +func NewMsgChannelOpenTry( + portID, previousChannelID, version string, channelOrder Order, connectionHops []string, + counterpartyPortID, counterpartyChannelID, counterpartyVersion string, + proofInit []byte, proofHeight clienttypes.Height, signer sdk.AccAddress, +) *MsgChannelOpenTry { + counterparty := NewCounterparty(counterpartyPortID, counterpartyChannelID) + channel := NewChannel(TRYOPEN, channelOrder, counterparty, connectionHops, version) + return &MsgChannelOpenTry{ + PortId: portID, + PreviousChannelId: previousChannelID, + Channel: channel, + CounterpartyVersion: counterpartyVersion, + ProofInit: proofInit, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgChannelOpenTry) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgChannelOpenTry) Type() string { + return "channel_open_try" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgChannelOpenTry) ValidateBasic() error { + if err := host.PortIdentifierValidator(msg.PortId); err != nil { + return sdkerrors.Wrap(err, "invalid port ID") + } + if msg.PreviousChannelId != "" { + if !IsValidChannelID(msg.PreviousChannelId) { + return sdkerrors.Wrap(ErrInvalidChannelIdentifier, "invalid previous channel ID") + } + } + if len(msg.ProofInit) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof init") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + if msg.Channel.State != TRYOPEN { + return sdkerrors.Wrapf(ErrInvalidChannelState, + "channel state must be TRYOPEN in MsgChannelOpenTry. expected: %s, got: %s", + TRYOPEN, msg.Channel.State, + ) + } + // counterparty validate basic allows empty counterparty channel identifiers + if err := host.ChannelIdentifierValidator(msg.Channel.Counterparty.ChannelId); err != nil { + return sdkerrors.Wrap(err, "invalid counterparty channel ID") + } + + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return msg.Channel.ValidateBasic() +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgChannelOpenTry) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgChannelOpenTry) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +var _ sdk.Msg = &MsgChannelOpenAck{} + +// NewMsgChannelOpenAck creates a new MsgChannelOpenAck instance +// nolint:interfacer +func NewMsgChannelOpenAck( + portID, channelID, counterpartyChannelID string, cpv string, proofTry []byte, proofHeight clienttypes.Height, + signer sdk.AccAddress, +) *MsgChannelOpenAck { + return &MsgChannelOpenAck{ + PortId: portID, + ChannelId: channelID, + CounterpartyChannelId: counterpartyChannelID, + CounterpartyVersion: cpv, + ProofTry: proofTry, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgChannelOpenAck) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgChannelOpenAck) Type() string { + return "channel_open_ack" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgChannelOpenAck) ValidateBasic() error { + if err := host.PortIdentifierValidator(msg.PortId); err != nil { + return sdkerrors.Wrap(err, "invalid port ID") + } + if !IsValidChannelID(msg.ChannelId) { + return ErrInvalidChannelIdentifier + } + if err := host.ChannelIdentifierValidator(msg.CounterpartyChannelId); err != nil { + return sdkerrors.Wrap(err, "invalid counterparty channel ID") + } + if len(msg.ProofTry) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof try") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return nil +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgChannelOpenAck) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgChannelOpenAck) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +var _ sdk.Msg = &MsgChannelOpenConfirm{} + +// NewMsgChannelOpenConfirm creates a new MsgChannelOpenConfirm instance +// nolint:interfacer +func NewMsgChannelOpenConfirm( + portID, channelID string, proofAck []byte, proofHeight clienttypes.Height, + signer sdk.AccAddress, +) *MsgChannelOpenConfirm { + return &MsgChannelOpenConfirm{ + PortId: portID, + ChannelId: channelID, + ProofAck: proofAck, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgChannelOpenConfirm) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgChannelOpenConfirm) Type() string { + return "channel_open_confirm" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgChannelOpenConfirm) ValidateBasic() error { + if err := host.PortIdentifierValidator(msg.PortId); err != nil { + return sdkerrors.Wrap(err, "invalid port ID") + } + if !IsValidChannelID(msg.ChannelId) { + return ErrInvalidChannelIdentifier + } + if len(msg.ProofAck) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof ack") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return nil +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgChannelOpenConfirm) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgChannelOpenConfirm) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +var _ sdk.Msg = &MsgChannelCloseInit{} + +// NewMsgChannelCloseInit creates a new MsgChannelCloseInit instance +// nolint:interfacer +func NewMsgChannelCloseInit( + portID string, channelID string, signer sdk.AccAddress, +) *MsgChannelCloseInit { + return &MsgChannelCloseInit{ + PortId: portID, + ChannelId: channelID, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgChannelCloseInit) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgChannelCloseInit) Type() string { + return "channel_close_init" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgChannelCloseInit) ValidateBasic() error { + if err := host.PortIdentifierValidator(msg.PortId); err != nil { + return sdkerrors.Wrap(err, "invalid port ID") + } + if !IsValidChannelID(msg.ChannelId) { + return ErrInvalidChannelIdentifier + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return nil +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgChannelCloseInit) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgChannelCloseInit) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +var _ sdk.Msg = &MsgChannelCloseConfirm{} + +// NewMsgChannelCloseConfirm creates a new MsgChannelCloseConfirm instance +// nolint:interfacer +func NewMsgChannelCloseConfirm( + portID, channelID string, proofInit []byte, proofHeight clienttypes.Height, + signer sdk.AccAddress, +) *MsgChannelCloseConfirm { + return &MsgChannelCloseConfirm{ + PortId: portID, + ChannelId: channelID, + ProofInit: proofInit, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgChannelCloseConfirm) Route() string { + return host.RouterKey +} + +// Type implements sdk.Msg +func (msg MsgChannelCloseConfirm) Type() string { + return "channel_close_confirm" +} + +// ValidateBasic implements sdk.Msg +func (msg MsgChannelCloseConfirm) ValidateBasic() error { + if err := host.PortIdentifierValidator(msg.PortId); err != nil { + return sdkerrors.Wrap(err, "invalid port ID") + } + if !IsValidChannelID(msg.ChannelId) { + return ErrInvalidChannelIdentifier + } + if len(msg.ProofInit) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof init") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return nil +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgChannelCloseConfirm) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgChannelCloseConfirm) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +var _ sdk.Msg = &MsgRecvPacket{} + +// NewMsgRecvPacket constructs new MsgRecvPacket +// nolint:interfacer +func NewMsgRecvPacket( + packet Packet, proofCommitment []byte, proofHeight clienttypes.Height, + signer sdk.AccAddress, +) *MsgRecvPacket { + return &MsgRecvPacket{ + Packet: packet, + ProofCommitment: proofCommitment, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgRecvPacket) Route() string { + return host.RouterKey +} + +// ValidateBasic implements sdk.Msg +func (msg MsgRecvPacket) ValidateBasic() error { + if len(msg.ProofCommitment) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return msg.Packet.ValidateBasic() +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgRecvPacket) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetDataSignBytes returns the base64-encoded bytes used for the +// data field when signing the packet. +func (msg MsgRecvPacket) GetDataSignBytes() []byte { + s := "\"" + base64.StdEncoding.EncodeToString(msg.Packet.Data) + "\"" + return []byte(s) +} + +// GetSigners implements sdk.Msg +func (msg MsgRecvPacket) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +// Type implements sdk.Msg +func (msg MsgRecvPacket) Type() string { + return "recv_packet" +} + +var _ sdk.Msg = &MsgTimeout{} + +// NewMsgTimeout constructs new MsgTimeout +// nolint:interfacer +func NewMsgTimeout( + packet Packet, nextSequenceRecv uint64, proofUnreceived []byte, + proofHeight clienttypes.Height, signer sdk.AccAddress, +) *MsgTimeout { + return &MsgTimeout{ + Packet: packet, + NextSequenceRecv: nextSequenceRecv, + ProofUnreceived: proofUnreceived, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgTimeout) Route() string { + return host.RouterKey +} + +// ValidateBasic implements sdk.Msg +func (msg MsgTimeout) ValidateBasic() error { + if len(msg.ProofUnreceived) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty unreceived proof") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + if msg.NextSequenceRecv == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "next sequence receive cannot be 0") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return msg.Packet.ValidateBasic() +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgTimeout) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgTimeout) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +// Type implements sdk.Msg +func (msg MsgTimeout) Type() string { + return "timeout_packet" +} + +// NewMsgTimeoutOnClose constructs new MsgTimeoutOnClose +// nolint:interfacer +func NewMsgTimeoutOnClose( + packet Packet, nextSequenceRecv uint64, + proofUnreceived, proofClose []byte, + proofHeight clienttypes.Height, signer sdk.AccAddress, +) *MsgTimeoutOnClose { + return &MsgTimeoutOnClose{ + Packet: packet, + NextSequenceRecv: nextSequenceRecv, + ProofUnreceived: proofUnreceived, + ProofClose: proofClose, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgTimeoutOnClose) Route() string { + return host.RouterKey +} + +// ValidateBasic implements sdk.Msg +func (msg MsgTimeoutOnClose) ValidateBasic() error { + if msg.NextSequenceRecv == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "next sequence receive cannot be 0") + } + if len(msg.ProofUnreceived) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof") + } + if len(msg.ProofClose) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof of closed counterparty channel end") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return msg.Packet.ValidateBasic() +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgTimeoutOnClose) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgTimeoutOnClose) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +// Type implements sdk.Msg +func (msg MsgTimeoutOnClose) Type() string { + return "timeout_on_close_packet" +} + +var _ sdk.Msg = &MsgAcknowledgement{} + +// NewMsgAcknowledgement constructs a new MsgAcknowledgement +// nolint:interfacer +func NewMsgAcknowledgement( + packet Packet, + ack, proofAcked []byte, + proofHeight clienttypes.Height, + signer sdk.AccAddress, +) *MsgAcknowledgement { + return &MsgAcknowledgement{ + Packet: packet, + Acknowledgement: ack, + ProofAcked: proofAcked, + ProofHeight: proofHeight, + Signer: signer.String(), + } +} + +// Route implements sdk.Msg +func (msg MsgAcknowledgement) Route() string { + return host.RouterKey +} + +// ValidateBasic implements sdk.Msg +func (msg MsgAcknowledgement) ValidateBasic() error { + if len(msg.ProofAcked) == 0 { + return sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof") + } + if msg.ProofHeight.IsZero() { + return sdkerrors.Wrap(sdkerrors.ErrInvalidHeight, "proof height must be non-zero") + } + if len(msg.Acknowledgement) == 0 { + return sdkerrors.Wrap(ErrInvalidAcknowledgement, "ack bytes cannot be empty") + } + _, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + } + return msg.Packet.ValidateBasic() +} + +// GetSignBytes implements sdk.Msg. The function will panic since it is used +// for amino transaction verification which IBC does not support. +func (msg MsgAcknowledgement) GetSignBytes() []byte { + panic("IBC messages do not support amino") +} + +// GetSigners implements sdk.Msg +func (msg MsgAcknowledgement) GetSigners() []sdk.AccAddress { + signer, err := sdk.AccAddressFromBech32(msg.Signer) + if err != nil { + panic(err) + } + return []sdk.AccAddress{signer} +} + +// Type implements sdk.Msg +func (msg MsgAcknowledgement) Type() string { + return "acknowledge_packet" +} diff --git a/core/04-channel/types/msgs_test.go b/core/04-channel/types/msgs_test.go new file mode 100644 index 0000000000..9c27fd69ef --- /dev/null +++ b/core/04-channel/types/msgs_test.go @@ -0,0 +1,446 @@ +package types_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + + abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tm-db" + + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/store/iavl" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +const ( + // valid constatns used for testing + portid = "testportid" + chanid = "channel-0" + cpportid = "testcpport" + cpchanid = "testcpchannel" + + version = "1.0" + + // invalid constants used for testing + invalidPort = "(invalidport1)" + invalidShortPort = "p" + invalidLongPort = "invalidlongportinvalidlongportinvalidlongportidinvalidlongportidinvalid" + + invalidChannel = "(invalidchannel1)" + invalidShortChannel = "invalid" + invalidLongChannel = "invalidlongchannelinvalidlongchannelinvalidlongchannelinvalidlongchannel" + + invalidConnection = "(invalidconnection1)" + invalidShortConnection = "invalidcn" + invalidLongConnection = "invalidlongconnectioninvalidlongconnectioninvalidlongconnectioninvalid" +) + +// define variables used for testing +var ( + height = clienttypes.NewHeight(0, 1) + timeoutHeight = clienttypes.NewHeight(0, 100) + timeoutTimestamp = uint64(100) + disabledTimeout = clienttypes.ZeroHeight() + validPacketData = []byte("testdata") + unknownPacketData = []byte("unknown") + + packet = types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp) + invalidPacket = types.NewPacket(unknownPacketData, 0, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp) + + emptyProof = []byte{} + invalidProofs1 = exported.Proof(nil) + invalidProofs2 = emptyProof + + addr = sdk.AccAddress("testaddr111111111111") + emptyAddr sdk.AccAddress + + connHops = []string{"testconnection"} + invalidConnHops = []string{"testconnection", "testconnection"} + invalidShortConnHops = []string{invalidShortConnection} + invalidLongConnHops = []string{invalidLongConnection} +) + +type TypesTestSuite struct { + suite.Suite + + proof []byte +} + +func (suite *TypesTestSuite) SetupTest() { + app := simapp.Setup(false) + db := dbm.NewMemDB() + store := rootmulti.NewStore(db) + storeKey := storetypes.NewKVStoreKey("iavlStoreKey") + + store.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, nil) + store.LoadVersion(0) + iavlStore := store.GetCommitStore(storeKey).(*iavl.Store) + + iavlStore.Set([]byte("KEY"), []byte("VALUE")) + _ = store.Commit() + + res := store.Query(abci.RequestQuery{ + Path: fmt.Sprintf("/%s/key", storeKey.Name()), // required path to get key/value+proof + Data: []byte("KEY"), + Prove: true, + }) + + merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) + suite.Require().NoError(err) + proof, err := app.AppCodec().MarshalBinaryBare(&merkleProof) + suite.Require().NoError(err) + + suite.proof = proof +} + +func TestTypesTestSuite(t *testing.T) { + suite.Run(t, new(TypesTestSuite)) +} + +func (suite *TypesTestSuite) TestMsgChannelOpenInitValidateBasic() { + counterparty := types.NewCounterparty(cpportid, cpchanid) + tryOpenChannel := types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, connHops, version) + + testCases := []struct { + name string + msg *types.MsgChannelOpenInit + expPass bool + }{ + {"", types.NewMsgChannelOpenInit(portid, version, types.ORDERED, connHops, cpportid, addr), true}, + {"too short port id", types.NewMsgChannelOpenInit(invalidShortPort, version, types.ORDERED, connHops, cpportid, addr), false}, + {"too long port id", types.NewMsgChannelOpenInit(invalidLongPort, version, types.ORDERED, connHops, cpportid, addr), false}, + {"port id contains non-alpha", types.NewMsgChannelOpenInit(invalidPort, version, types.ORDERED, connHops, cpportid, addr), false}, + {"invalid channel order", types.NewMsgChannelOpenInit(portid, version, types.Order(3), connHops, cpportid, addr), false}, + {"connection hops more than 1 ", types.NewMsgChannelOpenInit(portid, version, types.ORDERED, invalidConnHops, cpportid, addr), false}, + {"too short connection id", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, invalidShortConnHops, cpportid, addr), false}, + {"too long connection id", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, invalidLongConnHops, cpportid, addr), false}, + {"connection id contains non-alpha", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, []string{invalidConnection}, cpportid, addr), false}, + {"", types.NewMsgChannelOpenInit(portid, "", types.UNORDERED, connHops, cpportid, addr), true}, + {"invalid counterparty port id", types.NewMsgChannelOpenInit(portid, version, types.UNORDERED, connHops, invalidPort, addr), false}, + {"channel not in INIT state", &types.MsgChannelOpenInit{portid, tryOpenChannel, addr.String()}, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { + counterparty := types.NewCounterparty(cpportid, cpchanid) + initChannel := types.NewChannel(types.INIT, types.ORDERED, counterparty, connHops, version) + + testCases := []struct { + name string + msg *types.MsgChannelOpenTry + expPass bool + }{ + {"", types.NewMsgChannelOpenTry(portid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), true}, + {"too short port id", types.NewMsgChannelOpenTry(invalidShortPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"too long port id", types.NewMsgChannelOpenTry(invalidLongPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"port id contains non-alpha", types.NewMsgChannelOpenTry(invalidPort, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"too short channel id", types.NewMsgChannelOpenTry(portid, invalidShortChannel, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"too long channel id", types.NewMsgChannelOpenTry(portid, invalidLongChannel, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"channel id contains non-alpha", types.NewMsgChannelOpenTry(portid, invalidChannel, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"", types.NewMsgChannelOpenTry(portid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, "", suite.proof, height, addr), true}, + {"proof height is zero", types.NewMsgChannelOpenTry(portid, chanid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, clienttypes.ZeroHeight(), addr), false}, + {"invalid channel order", types.NewMsgChannelOpenTry(portid, chanid, version, types.Order(4), connHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"connection hops more than 1 ", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, invalidConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"too short connection id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, invalidShortConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"too long connection id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, invalidLongConnHops, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"connection id contains non-alpha", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, []string{invalidConnection}, cpportid, cpchanid, version, suite.proof, height, addr), false}, + {"", types.NewMsgChannelOpenTry(portid, chanid, "", types.UNORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), true}, + {"invalid counterparty port id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, invalidPort, cpchanid, version, suite.proof, height, addr), false}, + {"invalid counterparty channel id", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, cpportid, invalidChannel, version, suite.proof, height, addr), false}, + {"empty proof", types.NewMsgChannelOpenTry(portid, chanid, version, types.UNORDERED, connHops, cpportid, cpchanid, version, emptyProof, height, addr), false}, + {"channel not in TRYOPEN state", &types.MsgChannelOpenTry{portid, chanid, initChannel, version, suite.proof, height, addr.String()}, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { + testCases := []struct { + name string + msg *types.MsgChannelOpenAck + expPass bool + }{ + {"", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, suite.proof, height, addr), true}, + {"too short port id", types.NewMsgChannelOpenAck(invalidShortPort, chanid, chanid, version, suite.proof, height, addr), false}, + {"too long port id", types.NewMsgChannelOpenAck(invalidLongPort, chanid, chanid, version, suite.proof, height, addr), false}, + {"port id contains non-alpha", types.NewMsgChannelOpenAck(invalidPort, chanid, chanid, version, suite.proof, height, addr), false}, + {"too short channel id", types.NewMsgChannelOpenAck(portid, invalidShortChannel, chanid, version, suite.proof, height, addr), false}, + {"too long channel id", types.NewMsgChannelOpenAck(portid, invalidLongChannel, chanid, version, suite.proof, height, addr), false}, + {"channel id contains non-alpha", types.NewMsgChannelOpenAck(portid, invalidChannel, chanid, version, suite.proof, height, addr), false}, + {"", types.NewMsgChannelOpenAck(portid, chanid, chanid, "", suite.proof, height, addr), true}, + {"empty proof", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, emptyProof, height, addr), false}, + {"proof height is zero", types.NewMsgChannelOpenAck(portid, chanid, chanid, version, suite.proof, clienttypes.ZeroHeight(), addr), false}, + {"invalid counterparty channel id", types.NewMsgChannelOpenAck(portid, chanid, invalidShortChannel, version, suite.proof, height, addr), false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() { + testCases := []struct { + name string + msg *types.MsgChannelOpenConfirm + expPass bool + }{ + {"", types.NewMsgChannelOpenConfirm(portid, chanid, suite.proof, height, addr), true}, + {"too short port id", types.NewMsgChannelOpenConfirm(invalidShortPort, chanid, suite.proof, height, addr), false}, + {"too long port id", types.NewMsgChannelOpenConfirm(invalidLongPort, chanid, suite.proof, height, addr), false}, + {"port id contains non-alpha", types.NewMsgChannelOpenConfirm(invalidPort, chanid, suite.proof, height, addr), false}, + {"too short channel id", types.NewMsgChannelOpenConfirm(portid, invalidShortChannel, suite.proof, height, addr), false}, + {"too long channel id", types.NewMsgChannelOpenConfirm(portid, invalidLongChannel, suite.proof, height, addr), false}, + {"channel id contains non-alpha", types.NewMsgChannelOpenConfirm(portid, invalidChannel, suite.proof, height, addr), false}, + {"empty proof", types.NewMsgChannelOpenConfirm(portid, chanid, emptyProof, height, addr), false}, + {"proof height is zero", types.NewMsgChannelOpenConfirm(portid, chanid, suite.proof, clienttypes.ZeroHeight(), addr), false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgChannelCloseInitValidateBasic() { + testCases := []struct { + name string + msg *types.MsgChannelCloseInit + expPass bool + }{ + {"", types.NewMsgChannelCloseInit(portid, chanid, addr), true}, + {"too short port id", types.NewMsgChannelCloseInit(invalidShortPort, chanid, addr), false}, + {"too long port id", types.NewMsgChannelCloseInit(invalidLongPort, chanid, addr), false}, + {"port id contains non-alpha", types.NewMsgChannelCloseInit(invalidPort, chanid, addr), false}, + {"too short channel id", types.NewMsgChannelCloseInit(portid, invalidShortChannel, addr), false}, + {"too long channel id", types.NewMsgChannelCloseInit(portid, invalidLongChannel, addr), false}, + {"channel id contains non-alpha", types.NewMsgChannelCloseInit(portid, invalidChannel, addr), false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() { + testCases := []struct { + name string + msg *types.MsgChannelCloseConfirm + expPass bool + }{ + {"", types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, height, addr), true}, + {"too short port id", types.NewMsgChannelCloseConfirm(invalidShortPort, chanid, suite.proof, height, addr), false}, + {"too long port id", types.NewMsgChannelCloseConfirm(invalidLongPort, chanid, suite.proof, height, addr), false}, + {"port id contains non-alpha", types.NewMsgChannelCloseConfirm(invalidPort, chanid, suite.proof, height, addr), false}, + {"too short channel id", types.NewMsgChannelCloseConfirm(portid, invalidShortChannel, suite.proof, height, addr), false}, + {"too long channel id", types.NewMsgChannelCloseConfirm(portid, invalidLongChannel, suite.proof, height, addr), false}, + {"channel id contains non-alpha", types.NewMsgChannelCloseConfirm(portid, invalidChannel, suite.proof, height, addr), false}, + {"empty proof", types.NewMsgChannelCloseConfirm(portid, chanid, emptyProof, height, addr), false}, + {"proof height is zero", types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, clienttypes.ZeroHeight(), addr), false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgRecvPacketType() { + msg := types.NewMsgRecvPacket(packet, suite.proof, height, addr) + + suite.Equal("recv_packet", msg.Type()) +} + +func (suite *TypesTestSuite) TestMsgRecvPacketValidateBasic() { + testCases := []struct { + name string + msg *types.MsgRecvPacket + expPass bool + }{ + {"success", types.NewMsgRecvPacket(packet, suite.proof, height, addr), true}, + {"proof height is zero", types.NewMsgRecvPacket(packet, suite.proof, clienttypes.ZeroHeight(), addr), false}, + {"proof contain empty proof", types.NewMsgRecvPacket(packet, emptyProof, height, addr), false}, + {"missing signer address", types.NewMsgRecvPacket(packet, suite.proof, height, emptyAddr), false}, + {"invalid packet", types.NewMsgRecvPacket(invalidPacket, suite.proof, height, addr), false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.NoError(err) + } else { + suite.Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgRecvPacketGetSigners() { + msg := types.NewMsgRecvPacket(packet, suite.proof, height, addr) + res := msg.GetSigners() + + expected := "[7465737461646472313131313131313131313131]" + suite.Equal(expected, fmt.Sprintf("%v", res)) +} + +func (suite *TypesTestSuite) TestMsgTimeoutValidateBasic() { + testCases := []struct { + name string + msg *types.MsgTimeout + expPass bool + }{ + {"success", types.NewMsgTimeout(packet, 1, suite.proof, height, addr), true}, + {"proof height must be > 0", types.NewMsgTimeout(packet, 1, suite.proof, clienttypes.ZeroHeight(), addr), false}, + {"seq 0", types.NewMsgTimeout(packet, 0, suite.proof, height, addr), false}, + {"missing signer address", types.NewMsgTimeout(packet, 1, suite.proof, height, emptyAddr), false}, + {"cannot submit an empty proof", types.NewMsgTimeout(packet, 1, emptyProof, height, addr), false}, + {"invalid packet", types.NewMsgTimeout(invalidPacket, 1, suite.proof, height, addr), false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgTimeoutOnCloseValidateBasic() { + testCases := []struct { + name string + msg sdk.Msg + expPass bool + }{ + {"success", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, addr), true}, + {"seq 0", types.NewMsgTimeoutOnClose(packet, 0, suite.proof, suite.proof, height, addr), false}, + {"empty proof", types.NewMsgTimeoutOnClose(packet, 1, emptyProof, suite.proof, height, addr), false}, + {"empty proof close", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, emptyProof, height, addr), false}, + {"proof height is zero", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, clienttypes.ZeroHeight(), addr), false}, + {"signer address is empty", types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, emptyAddr), false}, + {"invalid packet", types.NewMsgTimeoutOnClose(invalidPacket, 1, suite.proof, suite.proof, height, addr), false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *TypesTestSuite) TestMsgAcknowledgementValidateBasic() { + testCases := []struct { + name string + msg *types.MsgAcknowledgement + expPass bool + }{ + {"success", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, addr), true}, + {"proof height must be > 0", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, clienttypes.ZeroHeight(), addr), false}, + {"empty ack", types.NewMsgAcknowledgement(packet, nil, suite.proof, height, addr), false}, + {"missing signer address", types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, emptyAddr), false}, + {"cannot submit an empty proof", types.NewMsgAcknowledgement(packet, packet.GetData(), emptyProof, height, addr), false}, + {"invalid packet", types.NewMsgAcknowledgement(invalidPacket, packet.GetData(), suite.proof, height, addr), false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.msg.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/core/04-channel/types/packet.go b/core/04-channel/types/packet.go new file mode 100644 index 0000000000..b5c8d18043 --- /dev/null +++ b/core/04-channel/types/packet.go @@ -0,0 +1,112 @@ +package types + +import ( + "crypto/sha256" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CommitPacket returns the packet commitment bytes. The commitment consists of: +// sha256_hash(timeout_timestamp + timeout_height.RevisionNumber + timeout_height.RevisionHeight + sha256_hash(data)) +// from a given packet. This results in a fixed length preimage. +// NOTE: sdk.Uint64ToBigEndian sets the uint64 to a slice of length 8. +func CommitPacket(cdc codec.BinaryMarshaler, packet exported.PacketI) []byte { + timeoutHeight := packet.GetTimeoutHeight() + + buf := sdk.Uint64ToBigEndian(packet.GetTimeoutTimestamp()) + + revisionNumber := sdk.Uint64ToBigEndian(timeoutHeight.GetRevisionNumber()) + buf = append(buf, revisionNumber...) + + revisionHeight := sdk.Uint64ToBigEndian(timeoutHeight.GetRevisionHeight()) + buf = append(buf, revisionHeight...) + + dataHash := sha256.Sum256(packet.GetData()) + buf = append(buf, dataHash[:]...) + + hash := sha256.Sum256(buf) + return hash[:] +} + +// CommitAcknowledgement returns the hash of commitment bytes +func CommitAcknowledgement(data []byte) []byte { + hash := sha256.Sum256(data) + return hash[:] +} + +var _ exported.PacketI = (*Packet)(nil) + +// NewPacket creates a new Packet instance. It panics if the provided +// packet data interface is not registered. +func NewPacket( + data []byte, + sequence uint64, sourcePort, sourceChannel, + destinationPort, destinationChannel string, + timeoutHeight clienttypes.Height, timeoutTimestamp uint64, +) Packet { + return Packet{ + Data: data, + Sequence: sequence, + SourcePort: sourcePort, + SourceChannel: sourceChannel, + DestinationPort: destinationPort, + DestinationChannel: destinationChannel, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTimestamp, + } +} + +// GetSequence implements PacketI interface +func (p Packet) GetSequence() uint64 { return p.Sequence } + +// GetSourcePort implements PacketI interface +func (p Packet) GetSourcePort() string { return p.SourcePort } + +// GetSourceChannel implements PacketI interface +func (p Packet) GetSourceChannel() string { return p.SourceChannel } + +// GetDestPort implements PacketI interface +func (p Packet) GetDestPort() string { return p.DestinationPort } + +// GetDestChannel implements PacketI interface +func (p Packet) GetDestChannel() string { return p.DestinationChannel } + +// GetData implements PacketI interface +func (p Packet) GetData() []byte { return p.Data } + +// GetTimeoutHeight implements PacketI interface +func (p Packet) GetTimeoutHeight() exported.Height { return p.TimeoutHeight } + +// GetTimeoutTimestamp implements PacketI interface +func (p Packet) GetTimeoutTimestamp() uint64 { return p.TimeoutTimestamp } + +// ValidateBasic implements PacketI interface +func (p Packet) ValidateBasic() error { + if err := host.PortIdentifierValidator(p.SourcePort); err != nil { + return sdkerrors.Wrap(err, "invalid source port ID") + } + if err := host.PortIdentifierValidator(p.DestinationPort); err != nil { + return sdkerrors.Wrap(err, "invalid destination port ID") + } + if err := host.ChannelIdentifierValidator(p.SourceChannel); err != nil { + return sdkerrors.Wrap(err, "invalid source channel ID") + } + if err := host.ChannelIdentifierValidator(p.DestinationChannel); err != nil { + return sdkerrors.Wrap(err, "invalid destination channel ID") + } + if p.Sequence == 0 { + return sdkerrors.Wrap(ErrInvalidPacket, "packet sequence cannot be 0") + } + if p.TimeoutHeight.IsZero() && p.TimeoutTimestamp == 0 { + return sdkerrors.Wrap(ErrInvalidPacket, "packet timeout height and packet timeout timestamp cannot both be 0") + } + if len(p.Data) == 0 { + return sdkerrors.Wrap(ErrInvalidPacket, "packet data bytes cannot be empty") + } + return nil +} diff --git a/core/04-channel/types/packet_test.go b/core/04-channel/types/packet_test.go new file mode 100644 index 0000000000..12ed828e66 --- /dev/null +++ b/core/04-channel/types/packet_test.go @@ -0,0 +1,53 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +func TestCommitPacket(t *testing.T) { + packet := types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp) + + registry := codectypes.NewInterfaceRegistry() + clienttypes.RegisterInterfaces(registry) + types.RegisterInterfaces(registry) + + cdc := codec.NewProtoCodec(registry) + + commitment := types.CommitPacket(cdc, &packet) + require.NotNil(t, commitment) +} + +func TestPacketValidateBasic(t *testing.T) { + testCases := []struct { + packet types.Packet + expPass bool + errMsg string + }{ + {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), true, ""}, + {types.NewPacket(validPacketData, 0, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), false, "invalid sequence"}, + {types.NewPacket(validPacketData, 1, invalidPort, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), false, "invalid source port"}, + {types.NewPacket(validPacketData, 1, portid, invalidChannel, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), false, "invalid source channel"}, + {types.NewPacket(validPacketData, 1, portid, chanid, invalidPort, cpchanid, timeoutHeight, timeoutTimestamp), false, "invalid destination port"}, + {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, invalidChannel, timeoutHeight, timeoutTimestamp), false, "invalid destination channel"}, + {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, disabledTimeout, 0), false, "disabled both timeout height and timestamp"}, + {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, disabledTimeout, timeoutTimestamp), true, "disabled timeout height, valid timeout timestamp"}, + {types.NewPacket(validPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, 0), true, "disabled timeout timestamp, valid timeout height"}, + {types.NewPacket(unknownPacketData, 1, portid, chanid, cpportid, cpchanid, timeoutHeight, timeoutTimestamp), true, ""}, + } + + for i, tc := range testCases { + err := tc.packet.ValidateBasic() + if tc.expPass { + require.NoError(t, err, "Msg %d failed: %s", i, tc.errMsg) + } else { + require.Error(t, err, "Invalid Msg %d passed: %s", i, tc.errMsg) + } + } +} diff --git a/core/04-channel/types/query.go b/core/04-channel/types/query.go new file mode 100644 index 0000000000..d1536dfc05 --- /dev/null +++ b/core/04-channel/types/query.go @@ -0,0 +1,94 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + _ codectypes.UnpackInterfacesMessage = QueryChannelClientStateResponse{} + _ codectypes.UnpackInterfacesMessage = QueryChannelConsensusStateResponse{} +) + +// NewQueryChannelResponse creates a new QueryChannelResponse instance +func NewQueryChannelResponse(channel Channel, proof []byte, height clienttypes.Height) *QueryChannelResponse { + return &QueryChannelResponse{ + Channel: &channel, + Proof: proof, + ProofHeight: height, + } +} + +// NewQueryChannelClientStateResponse creates a newQueryChannelClientStateResponse instance +func NewQueryChannelClientStateResponse(identifiedClientState clienttypes.IdentifiedClientState, proof []byte, height clienttypes.Height) *QueryChannelClientStateResponse { + return &QueryChannelClientStateResponse{ + IdentifiedClientState: &identifiedClientState, + Proof: proof, + ProofHeight: height, + } +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (qccsr QueryChannelClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return qccsr.IdentifiedClientState.UnpackInterfaces(unpacker) +} + +// NewQueryChannelConsensusStateResponse creates a newQueryChannelConsensusStateResponse instance +func NewQueryChannelConsensusStateResponse(clientID string, anyConsensusState *codectypes.Any, consensusStateHeight exported.Height, proof []byte, height clienttypes.Height) *QueryChannelConsensusStateResponse { + return &QueryChannelConsensusStateResponse{ + ConsensusState: anyConsensusState, + ClientId: clientID, + Proof: proof, + ProofHeight: height, + } +} + +// UnpackInterfaces implements UnpackInterfacesMesssage.UnpackInterfaces +func (qccsr QueryChannelConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(qccsr.ConsensusState, new(exported.ConsensusState)) +} + +// NewQueryPacketCommitmentResponse creates a new QueryPacketCommitmentResponse instance +func NewQueryPacketCommitmentResponse( + commitment []byte, proof []byte, height clienttypes.Height, +) *QueryPacketCommitmentResponse { + return &QueryPacketCommitmentResponse{ + Commitment: commitment, + Proof: proof, + ProofHeight: height, + } +} + +// NewQueryPacketReceiptResponse creates a new QueryPacketReceiptResponse instance +func NewQueryPacketReceiptResponse( + recvd bool, proof []byte, height clienttypes.Height, +) *QueryPacketReceiptResponse { + return &QueryPacketReceiptResponse{ + Received: recvd, + Proof: proof, + ProofHeight: height, + } +} + +// NewQueryPacketAcknowledgementResponse creates a new QueryPacketAcknowledgementResponse instance +func NewQueryPacketAcknowledgementResponse( + acknowledgement []byte, proof []byte, height clienttypes.Height, +) *QueryPacketAcknowledgementResponse { + return &QueryPacketAcknowledgementResponse{ + Acknowledgement: acknowledgement, + Proof: proof, + ProofHeight: height, + } +} + +// NewQueryNextSequenceReceiveResponse creates a new QueryNextSequenceReceiveResponse instance +func NewQueryNextSequenceReceiveResponse( + sequence uint64, proof []byte, height clienttypes.Height, +) *QueryNextSequenceReceiveResponse { + return &QueryNextSequenceReceiveResponse{ + NextSequenceReceive: sequence, + Proof: proof, + ProofHeight: height, + } +} diff --git a/core/04-channel/types/query.pb.go b/core/04-channel/types/query.pb.go new file mode 100644 index 0000000000..7330eaf243 --- /dev/null +++ b/core/04-channel/types/query.pb.go @@ -0,0 +1,7993 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/channel/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + types1 "github.com/cosmos/cosmos-sdk/codec/types" + query "github.com/cosmos/cosmos-sdk/types/query" + types "github.com/cosmos/ibc-go/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryChannelRequest is the request type for the Query/Channel RPC method +type QueryChannelRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryChannelRequest) Reset() { *m = QueryChannelRequest{} } +func (m *QueryChannelRequest) String() string { return proto.CompactTextString(m) } +func (*QueryChannelRequest) ProtoMessage() {} +func (*QueryChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{0} +} +func (m *QueryChannelRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryChannelRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryChannelRequest.Merge(m, src) +} +func (m *QueryChannelRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryChannelRequest proto.InternalMessageInfo + +func (m *QueryChannelRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryChannelRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +// QueryChannelResponse is the response type for the Query/Channel RPC method. +// Besides the Channel end, it includes a proof and the height from which the +// proof was retrieved. +type QueryChannelResponse struct { + // channel associated with the request identifiers + Channel *Channel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryChannelResponse) Reset() { *m = QueryChannelResponse{} } +func (m *QueryChannelResponse) String() string { return proto.CompactTextString(m) } +func (*QueryChannelResponse) ProtoMessage() {} +func (*QueryChannelResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{1} +} +func (m *QueryChannelResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryChannelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryChannelResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryChannelResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryChannelResponse.Merge(m, src) +} +func (m *QueryChannelResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryChannelResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryChannelResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryChannelResponse proto.InternalMessageInfo + +func (m *QueryChannelResponse) GetChannel() *Channel { + if m != nil { + return m.Channel + } + return nil +} + +func (m *QueryChannelResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryChannelResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryChannelsRequest is the request type for the Query/Channels RPC method +type QueryChannelsRequest struct { + // pagination request + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryChannelsRequest) Reset() { *m = QueryChannelsRequest{} } +func (m *QueryChannelsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryChannelsRequest) ProtoMessage() {} +func (*QueryChannelsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{2} +} +func (m *QueryChannelsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryChannelsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryChannelsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryChannelsRequest.Merge(m, src) +} +func (m *QueryChannelsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryChannelsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryChannelsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryChannelsRequest proto.InternalMessageInfo + +func (m *QueryChannelsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryChannelsResponse is the response type for the Query/Channels RPC method. +type QueryChannelsResponse struct { + // list of stored channels of the chain. + Channels []*IdentifiedChannel `protobuf:"bytes,1,rep,name=channels,proto3" json:"channels,omitempty"` + // pagination response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` + // query block height + Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"` +} + +func (m *QueryChannelsResponse) Reset() { *m = QueryChannelsResponse{} } +func (m *QueryChannelsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryChannelsResponse) ProtoMessage() {} +func (*QueryChannelsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{3} +} +func (m *QueryChannelsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryChannelsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryChannelsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryChannelsResponse.Merge(m, src) +} +func (m *QueryChannelsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryChannelsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryChannelsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryChannelsResponse proto.InternalMessageInfo + +func (m *QueryChannelsResponse) GetChannels() []*IdentifiedChannel { + if m != nil { + return m.Channels + } + return nil +} + +func (m *QueryChannelsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryChannelsResponse) GetHeight() types.Height { + if m != nil { + return m.Height + } + return types.Height{} +} + +// QueryConnectionChannelsRequest is the request type for the +// Query/QueryConnectionChannels RPC method +type QueryConnectionChannelsRequest struct { + // connection unique identifier + Connection string `protobuf:"bytes,1,opt,name=connection,proto3" json:"connection,omitempty"` + // pagination request + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryConnectionChannelsRequest) Reset() { *m = QueryConnectionChannelsRequest{} } +func (m *QueryConnectionChannelsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionChannelsRequest) ProtoMessage() {} +func (*QueryConnectionChannelsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{4} +} +func (m *QueryConnectionChannelsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionChannelsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionChannelsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionChannelsRequest.Merge(m, src) +} +func (m *QueryConnectionChannelsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionChannelsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionChannelsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionChannelsRequest proto.InternalMessageInfo + +func (m *QueryConnectionChannelsRequest) GetConnection() string { + if m != nil { + return m.Connection + } + return "" +} + +func (m *QueryConnectionChannelsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryConnectionChannelsResponse is the Response type for the +// Query/QueryConnectionChannels RPC method +type QueryConnectionChannelsResponse struct { + // list of channels associated with a connection. + Channels []*IdentifiedChannel `protobuf:"bytes,1,rep,name=channels,proto3" json:"channels,omitempty"` + // pagination response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` + // query block height + Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"` +} + +func (m *QueryConnectionChannelsResponse) Reset() { *m = QueryConnectionChannelsResponse{} } +func (m *QueryConnectionChannelsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryConnectionChannelsResponse) ProtoMessage() {} +func (*QueryConnectionChannelsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{5} +} +func (m *QueryConnectionChannelsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryConnectionChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryConnectionChannelsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryConnectionChannelsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryConnectionChannelsResponse.Merge(m, src) +} +func (m *QueryConnectionChannelsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryConnectionChannelsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryConnectionChannelsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryConnectionChannelsResponse proto.InternalMessageInfo + +func (m *QueryConnectionChannelsResponse) GetChannels() []*IdentifiedChannel { + if m != nil { + return m.Channels + } + return nil +} + +func (m *QueryConnectionChannelsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryConnectionChannelsResponse) GetHeight() types.Height { + if m != nil { + return m.Height + } + return types.Height{} +} + +// QueryChannelClientStateRequest is the request type for the Query/ClientState +// RPC method +type QueryChannelClientStateRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryChannelClientStateRequest) Reset() { *m = QueryChannelClientStateRequest{} } +func (m *QueryChannelClientStateRequest) String() string { return proto.CompactTextString(m) } +func (*QueryChannelClientStateRequest) ProtoMessage() {} +func (*QueryChannelClientStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{6} +} +func (m *QueryChannelClientStateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryChannelClientStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryChannelClientStateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryChannelClientStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryChannelClientStateRequest.Merge(m, src) +} +func (m *QueryChannelClientStateRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryChannelClientStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryChannelClientStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryChannelClientStateRequest proto.InternalMessageInfo + +func (m *QueryChannelClientStateRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryChannelClientStateRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +// QueryChannelClientStateResponse is the Response type for the +// Query/QueryChannelClientState RPC method +type QueryChannelClientStateResponse struct { + // client state associated with the channel + IdentifiedClientState *types.IdentifiedClientState `protobuf:"bytes,1,opt,name=identified_client_state,json=identifiedClientState,proto3" json:"identified_client_state,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryChannelClientStateResponse) Reset() { *m = QueryChannelClientStateResponse{} } +func (m *QueryChannelClientStateResponse) String() string { return proto.CompactTextString(m) } +func (*QueryChannelClientStateResponse) ProtoMessage() {} +func (*QueryChannelClientStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{7} +} +func (m *QueryChannelClientStateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryChannelClientStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryChannelClientStateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryChannelClientStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryChannelClientStateResponse.Merge(m, src) +} +func (m *QueryChannelClientStateResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryChannelClientStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryChannelClientStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryChannelClientStateResponse proto.InternalMessageInfo + +func (m *QueryChannelClientStateResponse) GetIdentifiedClientState() *types.IdentifiedClientState { + if m != nil { + return m.IdentifiedClientState + } + return nil +} + +func (m *QueryChannelClientStateResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryChannelClientStateResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryChannelConsensusStateRequest is the request type for the +// Query/ConsensusState RPC method +type QueryChannelConsensusStateRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // revision number of the consensus state + RevisionNumber uint64 `protobuf:"varint,3,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // revision height of the consensus state + RevisionHeight uint64 `protobuf:"varint,4,opt,name=revision_height,json=revisionHeight,proto3" json:"revision_height,omitempty"` +} + +func (m *QueryChannelConsensusStateRequest) Reset() { *m = QueryChannelConsensusStateRequest{} } +func (m *QueryChannelConsensusStateRequest) String() string { return proto.CompactTextString(m) } +func (*QueryChannelConsensusStateRequest) ProtoMessage() {} +func (*QueryChannelConsensusStateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{8} +} +func (m *QueryChannelConsensusStateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryChannelConsensusStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryChannelConsensusStateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryChannelConsensusStateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryChannelConsensusStateRequest.Merge(m, src) +} +func (m *QueryChannelConsensusStateRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryChannelConsensusStateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryChannelConsensusStateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryChannelConsensusStateRequest proto.InternalMessageInfo + +func (m *QueryChannelConsensusStateRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryChannelConsensusStateRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *QueryChannelConsensusStateRequest) GetRevisionNumber() uint64 { + if m != nil { + return m.RevisionNumber + } + return 0 +} + +func (m *QueryChannelConsensusStateRequest) GetRevisionHeight() uint64 { + if m != nil { + return m.RevisionHeight + } + return 0 +} + +// QueryChannelClientStateResponse is the Response type for the +// Query/QueryChannelClientState RPC method +type QueryChannelConsensusStateResponse struct { + // consensus state associated with the channel + ConsensusState *types1.Any `protobuf:"bytes,1,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty"` + // client ID associated with the consensus state + ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryChannelConsensusStateResponse) Reset() { *m = QueryChannelConsensusStateResponse{} } +func (m *QueryChannelConsensusStateResponse) String() string { return proto.CompactTextString(m) } +func (*QueryChannelConsensusStateResponse) ProtoMessage() {} +func (*QueryChannelConsensusStateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{9} +} +func (m *QueryChannelConsensusStateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryChannelConsensusStateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryChannelConsensusStateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryChannelConsensusStateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryChannelConsensusStateResponse.Merge(m, src) +} +func (m *QueryChannelConsensusStateResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryChannelConsensusStateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryChannelConsensusStateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryChannelConsensusStateResponse proto.InternalMessageInfo + +func (m *QueryChannelConsensusStateResponse) GetConsensusState() *types1.Any { + if m != nil { + return m.ConsensusState + } + return nil +} + +func (m *QueryChannelConsensusStateResponse) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *QueryChannelConsensusStateResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryChannelConsensusStateResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryPacketCommitmentRequest is the request type for the +// Query/PacketCommitment RPC method +type QueryPacketCommitmentRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // packet sequence + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` +} + +func (m *QueryPacketCommitmentRequest) Reset() { *m = QueryPacketCommitmentRequest{} } +func (m *QueryPacketCommitmentRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPacketCommitmentRequest) ProtoMessage() {} +func (*QueryPacketCommitmentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{10} +} +func (m *QueryPacketCommitmentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketCommitmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketCommitmentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketCommitmentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketCommitmentRequest.Merge(m, src) +} +func (m *QueryPacketCommitmentRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketCommitmentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketCommitmentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketCommitmentRequest proto.InternalMessageInfo + +func (m *QueryPacketCommitmentRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryPacketCommitmentRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *QueryPacketCommitmentRequest) GetSequence() uint64 { + if m != nil { + return m.Sequence + } + return 0 +} + +// QueryPacketCommitmentResponse defines the client query response for a packet +// which also includes a proof and the height from which the proof was +// retrieved +type QueryPacketCommitmentResponse struct { + // packet associated with the request fields + Commitment []byte `protobuf:"bytes,1,opt,name=commitment,proto3" json:"commitment,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryPacketCommitmentResponse) Reset() { *m = QueryPacketCommitmentResponse{} } +func (m *QueryPacketCommitmentResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPacketCommitmentResponse) ProtoMessage() {} +func (*QueryPacketCommitmentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{11} +} +func (m *QueryPacketCommitmentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketCommitmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketCommitmentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketCommitmentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketCommitmentResponse.Merge(m, src) +} +func (m *QueryPacketCommitmentResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketCommitmentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketCommitmentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketCommitmentResponse proto.InternalMessageInfo + +func (m *QueryPacketCommitmentResponse) GetCommitment() []byte { + if m != nil { + return m.Commitment + } + return nil +} + +func (m *QueryPacketCommitmentResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryPacketCommitmentResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryPacketCommitmentsRequest is the request type for the +// Query/QueryPacketCommitments RPC method +type QueryPacketCommitmentsRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // pagination request + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryPacketCommitmentsRequest) Reset() { *m = QueryPacketCommitmentsRequest{} } +func (m *QueryPacketCommitmentsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPacketCommitmentsRequest) ProtoMessage() {} +func (*QueryPacketCommitmentsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{12} +} +func (m *QueryPacketCommitmentsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketCommitmentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketCommitmentsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketCommitmentsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketCommitmentsRequest.Merge(m, src) +} +func (m *QueryPacketCommitmentsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketCommitmentsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketCommitmentsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketCommitmentsRequest proto.InternalMessageInfo + +func (m *QueryPacketCommitmentsRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryPacketCommitmentsRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *QueryPacketCommitmentsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryPacketCommitmentsResponse is the request type for the +// Query/QueryPacketCommitments RPC method +type QueryPacketCommitmentsResponse struct { + Commitments []*PacketState `protobuf:"bytes,1,rep,name=commitments,proto3" json:"commitments,omitempty"` + // pagination response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` + // query block height + Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"` +} + +func (m *QueryPacketCommitmentsResponse) Reset() { *m = QueryPacketCommitmentsResponse{} } +func (m *QueryPacketCommitmentsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPacketCommitmentsResponse) ProtoMessage() {} +func (*QueryPacketCommitmentsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{13} +} +func (m *QueryPacketCommitmentsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketCommitmentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketCommitmentsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketCommitmentsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketCommitmentsResponse.Merge(m, src) +} +func (m *QueryPacketCommitmentsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketCommitmentsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketCommitmentsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketCommitmentsResponse proto.InternalMessageInfo + +func (m *QueryPacketCommitmentsResponse) GetCommitments() []*PacketState { + if m != nil { + return m.Commitments + } + return nil +} + +func (m *QueryPacketCommitmentsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryPacketCommitmentsResponse) GetHeight() types.Height { + if m != nil { + return m.Height + } + return types.Height{} +} + +// QueryPacketReceiptRequest is the request type for the +// Query/PacketReceipt RPC method +type QueryPacketReceiptRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // packet sequence + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` +} + +func (m *QueryPacketReceiptRequest) Reset() { *m = QueryPacketReceiptRequest{} } +func (m *QueryPacketReceiptRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPacketReceiptRequest) ProtoMessage() {} +func (*QueryPacketReceiptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{14} +} +func (m *QueryPacketReceiptRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketReceiptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketReceiptRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketReceiptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketReceiptRequest.Merge(m, src) +} +func (m *QueryPacketReceiptRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketReceiptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketReceiptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketReceiptRequest proto.InternalMessageInfo + +func (m *QueryPacketReceiptRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryPacketReceiptRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *QueryPacketReceiptRequest) GetSequence() uint64 { + if m != nil { + return m.Sequence + } + return 0 +} + +// QueryPacketReceiptResponse defines the client query response for a packet +// receipt which also includes a proof, and the height from which the proof was +// retrieved +type QueryPacketReceiptResponse struct { + // success flag for if receipt exists + Received bool `protobuf:"varint,2,opt,name=received,proto3" json:"received,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryPacketReceiptResponse) Reset() { *m = QueryPacketReceiptResponse{} } +func (m *QueryPacketReceiptResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPacketReceiptResponse) ProtoMessage() {} +func (*QueryPacketReceiptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{15} +} +func (m *QueryPacketReceiptResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketReceiptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketReceiptResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketReceiptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketReceiptResponse.Merge(m, src) +} +func (m *QueryPacketReceiptResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketReceiptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketReceiptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketReceiptResponse proto.InternalMessageInfo + +func (m *QueryPacketReceiptResponse) GetReceived() bool { + if m != nil { + return m.Received + } + return false +} + +func (m *QueryPacketReceiptResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryPacketReceiptResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryPacketAcknowledgementRequest is the request type for the +// Query/PacketAcknowledgement RPC method +type QueryPacketAcknowledgementRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // packet sequence + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` +} + +func (m *QueryPacketAcknowledgementRequest) Reset() { *m = QueryPacketAcknowledgementRequest{} } +func (m *QueryPacketAcknowledgementRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPacketAcknowledgementRequest) ProtoMessage() {} +func (*QueryPacketAcknowledgementRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{16} +} +func (m *QueryPacketAcknowledgementRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketAcknowledgementRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketAcknowledgementRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketAcknowledgementRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketAcknowledgementRequest.Merge(m, src) +} +func (m *QueryPacketAcknowledgementRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketAcknowledgementRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketAcknowledgementRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketAcknowledgementRequest proto.InternalMessageInfo + +func (m *QueryPacketAcknowledgementRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryPacketAcknowledgementRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *QueryPacketAcknowledgementRequest) GetSequence() uint64 { + if m != nil { + return m.Sequence + } + return 0 +} + +// QueryPacketAcknowledgementResponse defines the client query response for a +// packet which also includes a proof and the height from which the +// proof was retrieved +type QueryPacketAcknowledgementResponse struct { + // packet associated with the request fields + Acknowledgement []byte `protobuf:"bytes,1,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryPacketAcknowledgementResponse) Reset() { *m = QueryPacketAcknowledgementResponse{} } +func (m *QueryPacketAcknowledgementResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPacketAcknowledgementResponse) ProtoMessage() {} +func (*QueryPacketAcknowledgementResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{17} +} +func (m *QueryPacketAcknowledgementResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketAcknowledgementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketAcknowledgementResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketAcknowledgementResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketAcknowledgementResponse.Merge(m, src) +} +func (m *QueryPacketAcknowledgementResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketAcknowledgementResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketAcknowledgementResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketAcknowledgementResponse proto.InternalMessageInfo + +func (m *QueryPacketAcknowledgementResponse) GetAcknowledgement() []byte { + if m != nil { + return m.Acknowledgement + } + return nil +} + +func (m *QueryPacketAcknowledgementResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryPacketAcknowledgementResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +// QueryPacketAcknowledgementsRequest is the request type for the +// Query/QueryPacketCommitments RPC method +type QueryPacketAcknowledgementsRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // pagination request + Pagination *query.PageRequest `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryPacketAcknowledgementsRequest) Reset() { *m = QueryPacketAcknowledgementsRequest{} } +func (m *QueryPacketAcknowledgementsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPacketAcknowledgementsRequest) ProtoMessage() {} +func (*QueryPacketAcknowledgementsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{18} +} +func (m *QueryPacketAcknowledgementsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketAcknowledgementsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketAcknowledgementsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketAcknowledgementsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketAcknowledgementsRequest.Merge(m, src) +} +func (m *QueryPacketAcknowledgementsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketAcknowledgementsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketAcknowledgementsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketAcknowledgementsRequest proto.InternalMessageInfo + +func (m *QueryPacketAcknowledgementsRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryPacketAcknowledgementsRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *QueryPacketAcknowledgementsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryPacketAcknowledgemetsResponse is the request type for the +// Query/QueryPacketAcknowledgements RPC method +type QueryPacketAcknowledgementsResponse struct { + Acknowledgements []*PacketState `protobuf:"bytes,1,rep,name=acknowledgements,proto3" json:"acknowledgements,omitempty"` + // pagination response + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` + // query block height + Height types.Height `protobuf:"bytes,3,opt,name=height,proto3" json:"height"` +} + +func (m *QueryPacketAcknowledgementsResponse) Reset() { *m = QueryPacketAcknowledgementsResponse{} } +func (m *QueryPacketAcknowledgementsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPacketAcknowledgementsResponse) ProtoMessage() {} +func (*QueryPacketAcknowledgementsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{19} +} +func (m *QueryPacketAcknowledgementsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPacketAcknowledgementsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPacketAcknowledgementsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPacketAcknowledgementsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPacketAcknowledgementsResponse.Merge(m, src) +} +func (m *QueryPacketAcknowledgementsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPacketAcknowledgementsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPacketAcknowledgementsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPacketAcknowledgementsResponse proto.InternalMessageInfo + +func (m *QueryPacketAcknowledgementsResponse) GetAcknowledgements() []*PacketState { + if m != nil { + return m.Acknowledgements + } + return nil +} + +func (m *QueryPacketAcknowledgementsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryPacketAcknowledgementsResponse) GetHeight() types.Height { + if m != nil { + return m.Height + } + return types.Height{} +} + +// QueryUnreceivedPacketsRequest is the request type for the +// Query/UnreceivedPackets RPC method +type QueryUnreceivedPacketsRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // list of packet sequences + PacketCommitmentSequences []uint64 `protobuf:"varint,3,rep,packed,name=packet_commitment_sequences,json=packetCommitmentSequences,proto3" json:"packet_commitment_sequences,omitempty"` +} + +func (m *QueryUnreceivedPacketsRequest) Reset() { *m = QueryUnreceivedPacketsRequest{} } +func (m *QueryUnreceivedPacketsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryUnreceivedPacketsRequest) ProtoMessage() {} +func (*QueryUnreceivedPacketsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{20} +} +func (m *QueryUnreceivedPacketsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryUnreceivedPacketsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryUnreceivedPacketsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryUnreceivedPacketsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryUnreceivedPacketsRequest.Merge(m, src) +} +func (m *QueryUnreceivedPacketsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryUnreceivedPacketsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryUnreceivedPacketsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryUnreceivedPacketsRequest proto.InternalMessageInfo + +func (m *QueryUnreceivedPacketsRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryUnreceivedPacketsRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *QueryUnreceivedPacketsRequest) GetPacketCommitmentSequences() []uint64 { + if m != nil { + return m.PacketCommitmentSequences + } + return nil +} + +// QueryUnreceivedPacketsResponse is the response type for the +// Query/UnreceivedPacketCommitments RPC method +type QueryUnreceivedPacketsResponse struct { + // list of unreceived packet sequences + Sequences []uint64 `protobuf:"varint,1,rep,packed,name=sequences,proto3" json:"sequences,omitempty"` + // query block height + Height types.Height `protobuf:"bytes,2,opt,name=height,proto3" json:"height"` +} + +func (m *QueryUnreceivedPacketsResponse) Reset() { *m = QueryUnreceivedPacketsResponse{} } +func (m *QueryUnreceivedPacketsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryUnreceivedPacketsResponse) ProtoMessage() {} +func (*QueryUnreceivedPacketsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{21} +} +func (m *QueryUnreceivedPacketsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryUnreceivedPacketsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryUnreceivedPacketsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryUnreceivedPacketsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryUnreceivedPacketsResponse.Merge(m, src) +} +func (m *QueryUnreceivedPacketsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryUnreceivedPacketsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryUnreceivedPacketsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryUnreceivedPacketsResponse proto.InternalMessageInfo + +func (m *QueryUnreceivedPacketsResponse) GetSequences() []uint64 { + if m != nil { + return m.Sequences + } + return nil +} + +func (m *QueryUnreceivedPacketsResponse) GetHeight() types.Height { + if m != nil { + return m.Height + } + return types.Height{} +} + +// QueryUnreceivedAcks is the request type for the +// Query/UnreceivedAcks RPC method +type QueryUnreceivedAcksRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // list of acknowledgement sequences + PacketAckSequences []uint64 `protobuf:"varint,3,rep,packed,name=packet_ack_sequences,json=packetAckSequences,proto3" json:"packet_ack_sequences,omitempty"` +} + +func (m *QueryUnreceivedAcksRequest) Reset() { *m = QueryUnreceivedAcksRequest{} } +func (m *QueryUnreceivedAcksRequest) String() string { return proto.CompactTextString(m) } +func (*QueryUnreceivedAcksRequest) ProtoMessage() {} +func (*QueryUnreceivedAcksRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{22} +} +func (m *QueryUnreceivedAcksRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryUnreceivedAcksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryUnreceivedAcksRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryUnreceivedAcksRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryUnreceivedAcksRequest.Merge(m, src) +} +func (m *QueryUnreceivedAcksRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryUnreceivedAcksRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryUnreceivedAcksRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryUnreceivedAcksRequest proto.InternalMessageInfo + +func (m *QueryUnreceivedAcksRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryUnreceivedAcksRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *QueryUnreceivedAcksRequest) GetPacketAckSequences() []uint64 { + if m != nil { + return m.PacketAckSequences + } + return nil +} + +// QueryUnreceivedAcksResponse is the response type for the +// Query/UnreceivedAcks RPC method +type QueryUnreceivedAcksResponse struct { + // list of unreceived acknowledgement sequences + Sequences []uint64 `protobuf:"varint,1,rep,packed,name=sequences,proto3" json:"sequences,omitempty"` + // query block height + Height types.Height `protobuf:"bytes,2,opt,name=height,proto3" json:"height"` +} + +func (m *QueryUnreceivedAcksResponse) Reset() { *m = QueryUnreceivedAcksResponse{} } +func (m *QueryUnreceivedAcksResponse) String() string { return proto.CompactTextString(m) } +func (*QueryUnreceivedAcksResponse) ProtoMessage() {} +func (*QueryUnreceivedAcksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{23} +} +func (m *QueryUnreceivedAcksResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryUnreceivedAcksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryUnreceivedAcksResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryUnreceivedAcksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryUnreceivedAcksResponse.Merge(m, src) +} +func (m *QueryUnreceivedAcksResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryUnreceivedAcksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryUnreceivedAcksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryUnreceivedAcksResponse proto.InternalMessageInfo + +func (m *QueryUnreceivedAcksResponse) GetSequences() []uint64 { + if m != nil { + return m.Sequences + } + return nil +} + +func (m *QueryUnreceivedAcksResponse) GetHeight() types.Height { + if m != nil { + return m.Height + } + return types.Height{} +} + +// QueryNextSequenceReceiveRequest is the request type for the +// Query/QueryNextSequenceReceiveRequest RPC method +type QueryNextSequenceReceiveRequest struct { + // port unique identifier + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"` + // channel unique identifier + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryNextSequenceReceiveRequest) Reset() { *m = QueryNextSequenceReceiveRequest{} } +func (m *QueryNextSequenceReceiveRequest) String() string { return proto.CompactTextString(m) } +func (*QueryNextSequenceReceiveRequest) ProtoMessage() {} +func (*QueryNextSequenceReceiveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{24} +} +func (m *QueryNextSequenceReceiveRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryNextSequenceReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryNextSequenceReceiveRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryNextSequenceReceiveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryNextSequenceReceiveRequest.Merge(m, src) +} +func (m *QueryNextSequenceReceiveRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryNextSequenceReceiveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryNextSequenceReceiveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryNextSequenceReceiveRequest proto.InternalMessageInfo + +func (m *QueryNextSequenceReceiveRequest) GetPortId() string { + if m != nil { + return m.PortId + } + return "" +} + +func (m *QueryNextSequenceReceiveRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +// QuerySequenceResponse is the request type for the +// Query/QueryNextSequenceReceiveResponse RPC method +type QueryNextSequenceReceiveResponse struct { + // next sequence receive number + NextSequenceReceive uint64 `protobuf:"varint,1,opt,name=next_sequence_receive,json=nextSequenceReceive,proto3" json:"next_sequence_receive,omitempty"` + // merkle proof of existence + Proof []byte `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` + // height at which the proof was retrieved + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height"` +} + +func (m *QueryNextSequenceReceiveResponse) Reset() { *m = QueryNextSequenceReceiveResponse{} } +func (m *QueryNextSequenceReceiveResponse) String() string { return proto.CompactTextString(m) } +func (*QueryNextSequenceReceiveResponse) ProtoMessage() {} +func (*QueryNextSequenceReceiveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3acdacc9aeb4fa50, []int{25} +} +func (m *QueryNextSequenceReceiveResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryNextSequenceReceiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryNextSequenceReceiveResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryNextSequenceReceiveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryNextSequenceReceiveResponse.Merge(m, src) +} +func (m *QueryNextSequenceReceiveResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryNextSequenceReceiveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryNextSequenceReceiveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryNextSequenceReceiveResponse proto.InternalMessageInfo + +func (m *QueryNextSequenceReceiveResponse) GetNextSequenceReceive() uint64 { + if m != nil { + return m.NextSequenceReceive + } + return 0 +} + +func (m *QueryNextSequenceReceiveResponse) GetProof() []byte { + if m != nil { + return m.Proof + } + return nil +} + +func (m *QueryNextSequenceReceiveResponse) GetProofHeight() types.Height { + if m != nil { + return m.ProofHeight + } + return types.Height{} +} + +func init() { + proto.RegisterType((*QueryChannelRequest)(nil), "ibcgo.core.channel.v1.QueryChannelRequest") + proto.RegisterType((*QueryChannelResponse)(nil), "ibcgo.core.channel.v1.QueryChannelResponse") + proto.RegisterType((*QueryChannelsRequest)(nil), "ibcgo.core.channel.v1.QueryChannelsRequest") + proto.RegisterType((*QueryChannelsResponse)(nil), "ibcgo.core.channel.v1.QueryChannelsResponse") + proto.RegisterType((*QueryConnectionChannelsRequest)(nil), "ibcgo.core.channel.v1.QueryConnectionChannelsRequest") + proto.RegisterType((*QueryConnectionChannelsResponse)(nil), "ibcgo.core.channel.v1.QueryConnectionChannelsResponse") + proto.RegisterType((*QueryChannelClientStateRequest)(nil), "ibcgo.core.channel.v1.QueryChannelClientStateRequest") + proto.RegisterType((*QueryChannelClientStateResponse)(nil), "ibcgo.core.channel.v1.QueryChannelClientStateResponse") + proto.RegisterType((*QueryChannelConsensusStateRequest)(nil), "ibcgo.core.channel.v1.QueryChannelConsensusStateRequest") + proto.RegisterType((*QueryChannelConsensusStateResponse)(nil), "ibcgo.core.channel.v1.QueryChannelConsensusStateResponse") + proto.RegisterType((*QueryPacketCommitmentRequest)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentRequest") + proto.RegisterType((*QueryPacketCommitmentResponse)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentResponse") + proto.RegisterType((*QueryPacketCommitmentsRequest)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentsRequest") + proto.RegisterType((*QueryPacketCommitmentsResponse)(nil), "ibcgo.core.channel.v1.QueryPacketCommitmentsResponse") + proto.RegisterType((*QueryPacketReceiptRequest)(nil), "ibcgo.core.channel.v1.QueryPacketReceiptRequest") + proto.RegisterType((*QueryPacketReceiptResponse)(nil), "ibcgo.core.channel.v1.QueryPacketReceiptResponse") + proto.RegisterType((*QueryPacketAcknowledgementRequest)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementRequest") + proto.RegisterType((*QueryPacketAcknowledgementResponse)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementResponse") + proto.RegisterType((*QueryPacketAcknowledgementsRequest)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementsRequest") + proto.RegisterType((*QueryPacketAcknowledgementsResponse)(nil), "ibcgo.core.channel.v1.QueryPacketAcknowledgementsResponse") + proto.RegisterType((*QueryUnreceivedPacketsRequest)(nil), "ibcgo.core.channel.v1.QueryUnreceivedPacketsRequest") + proto.RegisterType((*QueryUnreceivedPacketsResponse)(nil), "ibcgo.core.channel.v1.QueryUnreceivedPacketsResponse") + proto.RegisterType((*QueryUnreceivedAcksRequest)(nil), "ibcgo.core.channel.v1.QueryUnreceivedAcksRequest") + proto.RegisterType((*QueryUnreceivedAcksResponse)(nil), "ibcgo.core.channel.v1.QueryUnreceivedAcksResponse") + proto.RegisterType((*QueryNextSequenceReceiveRequest)(nil), "ibcgo.core.channel.v1.QueryNextSequenceReceiveRequest") + proto.RegisterType((*QueryNextSequenceReceiveResponse)(nil), "ibcgo.core.channel.v1.QueryNextSequenceReceiveResponse") +} + +func init() { proto.RegisterFile("ibcgo/core/channel/v1/query.proto", fileDescriptor_3acdacc9aeb4fa50) } + +var fileDescriptor_3acdacc9aeb4fa50 = []byte{ + // 1487 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcb, 0x8f, 0xdb, 0xd4, + 0x17, 0x9e, 0x9b, 0x99, 0xb6, 0x99, 0x33, 0xfd, 0xf5, 0x71, 0x3b, 0x69, 0xa7, 0xee, 0x34, 0x9d, + 0xba, 0xfa, 0xd1, 0x51, 0x4b, 0xed, 0x66, 0xfa, 0xa0, 0x54, 0x50, 0xa9, 0x2d, 0xd0, 0x0e, 0x52, + 0x5f, 0x2e, 0x15, 0x6d, 0x25, 0x08, 0x8e, 0x73, 0x9b, 0xb1, 0x66, 0x62, 0xbb, 0xb1, 0x93, 0xb6, + 0x0c, 0x41, 0x88, 0x05, 0x20, 0xc4, 0x02, 0x09, 0x01, 0x1b, 0x24, 0x36, 0x88, 0x0d, 0xea, 0x8a, + 0x3f, 0x80, 0x05, 0x9b, 0x2e, 0x2b, 0x15, 0x89, 0x6e, 0x78, 0x68, 0x06, 0x89, 0x45, 0xd9, 0xb2, + 0x61, 0x85, 0x7c, 0x1f, 0x8e, 0x9d, 0xd8, 0x9e, 0x49, 0x32, 0x91, 0x2a, 0x76, 0xf1, 0xf5, 0x3d, + 0xe7, 0x7e, 0xdf, 0x77, 0xee, 0x39, 0x39, 0x27, 0x81, 0xbd, 0x66, 0xc9, 0xa8, 0xd8, 0xaa, 0x61, + 0xd7, 0x88, 0x6a, 0xcc, 0xe9, 0x96, 0x45, 0x16, 0xd4, 0x46, 0x41, 0xbd, 0x5d, 0x27, 0xb5, 0x7b, + 0x8a, 0x53, 0xb3, 0x3d, 0x1b, 0xe7, 0xe8, 0x16, 0xc5, 0xdf, 0xa2, 0xf0, 0x2d, 0x4a, 0xa3, 0x20, + 0x45, 0x2c, 0x17, 0x4c, 0x62, 0x79, 0xbe, 0x21, 0xfb, 0xc4, 0x2c, 0xa5, 0x03, 0x86, 0xed, 0x56, + 0x6d, 0x57, 0x2d, 0xe9, 0x2e, 0x61, 0x2e, 0xd5, 0x46, 0xa1, 0x44, 0x3c, 0xbd, 0xa0, 0x3a, 0x7a, + 0xc5, 0xb4, 0x74, 0xcf, 0xb4, 0x2d, 0xbe, 0x77, 0x5f, 0x3c, 0x10, 0x71, 0x20, 0xdb, 0x34, 0x59, + 0xb1, 0xed, 0xca, 0x02, 0x51, 0x75, 0xc7, 0x54, 0x75, 0xcb, 0xb2, 0x3d, 0xea, 0xc1, 0xe5, 0x6f, + 0x77, 0xf2, 0xb7, 0xf4, 0xa9, 0x54, 0xbf, 0xa5, 0xea, 0x16, 0xe7, 0x20, 0x8d, 0x57, 0xec, 0x8a, + 0x4d, 0x3f, 0xaa, 0xfe, 0x27, 0xb6, 0x2a, 0x5f, 0x80, 0x6d, 0x57, 0x7c, 0x54, 0x67, 0xd9, 0x21, + 0x1a, 0xb9, 0x5d, 0x27, 0xae, 0x87, 0x77, 0xc0, 0x06, 0xc7, 0xae, 0x79, 0x45, 0xb3, 0x3c, 0x81, + 0xa6, 0xd0, 0xf4, 0xa8, 0xb6, 0xde, 0x7f, 0x9c, 0x2d, 0xe3, 0xdd, 0x00, 0x1c, 0x8f, 0xff, 0x2e, + 0x43, 0xdf, 0x8d, 0xf2, 0x95, 0xd9, 0xb2, 0x7c, 0x1f, 0xc1, 0x78, 0xd4, 0x9f, 0xeb, 0xd8, 0x96, + 0x4b, 0xf0, 0x09, 0xd8, 0xc0, 0x77, 0x51, 0x87, 0x63, 0x33, 0x79, 0x25, 0x56, 0x53, 0x45, 0x18, + 0x8a, 0xed, 0x78, 0x1c, 0xd6, 0x39, 0x35, 0xdb, 0xbe, 0x45, 0x0f, 0xdb, 0xa8, 0xb1, 0x07, 0xfc, + 0x32, 0x6c, 0xa4, 0x1f, 0x8a, 0x73, 0xc4, 0xac, 0xcc, 0x79, 0x13, 0xc3, 0xd4, 0xe9, 0x64, 0xc4, + 0x29, 0x8b, 0x43, 0xa3, 0xa0, 0x9c, 0xa7, 0x7b, 0xce, 0x8c, 0x3c, 0xf8, 0x75, 0xcf, 0x90, 0x36, + 0x46, 0xed, 0xd8, 0x92, 0xfc, 0x66, 0x14, 0xae, 0x2b, 0xf8, 0xbf, 0x02, 0xd0, 0x0a, 0x0f, 0x47, + 0xfc, 0x8c, 0xc2, 0x62, 0xa9, 0xf8, 0xb1, 0x54, 0xd8, 0xf5, 0xe0, 0xb1, 0x54, 0x2e, 0xeb, 0x15, + 0xc2, 0x6d, 0xb5, 0x90, 0xa5, 0xbc, 0x84, 0x20, 0xd7, 0x76, 0x00, 0x17, 0xe4, 0x25, 0xc8, 0x72, + 0x86, 0xee, 0x04, 0x9a, 0x1a, 0x9e, 0x1e, 0x9b, 0x99, 0x4e, 0x50, 0x64, 0xb6, 0x4c, 0x2c, 0xcf, + 0xbc, 0x65, 0x92, 0xb2, 0xd0, 0x26, 0xb0, 0xc4, 0xe7, 0x22, 0x38, 0x33, 0x14, 0xe7, 0xfe, 0x15, + 0x71, 0x32, 0x08, 0x61, 0xa0, 0xf8, 0x24, 0xac, 0xef, 0x5a, 0x49, 0x6e, 0x21, 0x7f, 0x84, 0x20, + 0xcf, 0x48, 0xda, 0x96, 0x45, 0x0c, 0xdf, 0x5f, 0xbb, 0x9e, 0x79, 0x00, 0x23, 0x78, 0xc9, 0xaf, + 0x54, 0x68, 0xa5, 0x4d, 0xef, 0x4c, 0xcf, 0x7a, 0xff, 0x85, 0x60, 0x4f, 0x22, 0x94, 0xff, 0x9e, + 0xf2, 0xd7, 0x85, 0xf0, 0x0c, 0xd5, 0x59, 0xba, 0xfb, 0xaa, 0xa7, 0x7b, 0xa4, 0xdf, 0x44, 0x5e, + 0x0e, 0x84, 0x8c, 0x71, 0xcd, 0x85, 0x34, 0x60, 0x87, 0x19, 0x28, 0x54, 0x64, 0x50, 0x8b, 0xae, + 0xbf, 0x85, 0x67, 0xcc, 0xc1, 0x78, 0x2a, 0x21, 0x59, 0x43, 0x5e, 0x73, 0x66, 0xdc, 0xf2, 0x60, + 0xd3, 0xff, 0x3e, 0x82, 0xbd, 0x11, 0x96, 0x3e, 0x2f, 0xcb, 0xad, 0xbb, 0x6b, 0xa1, 0x21, 0xde, + 0x0f, 0x9b, 0x6b, 0xa4, 0x61, 0xba, 0xa6, 0x6d, 0x15, 0xad, 0x7a, 0xb5, 0x44, 0x6a, 0x14, 0xe7, + 0x88, 0xb6, 0x49, 0x2c, 0x5f, 0xa4, 0xab, 0x91, 0x8d, 0x9c, 0xd0, 0x48, 0x74, 0x23, 0xc7, 0xfb, + 0x0b, 0x02, 0x39, 0x0d, 0x2f, 0x0f, 0xcc, 0x8b, 0xb0, 0xd9, 0x10, 0x6f, 0x22, 0x01, 0x19, 0x57, + 0xd8, 0xf7, 0x83, 0x22, 0xbe, 0x1f, 0x94, 0xd3, 0xd6, 0x3d, 0x6d, 0x93, 0x11, 0x71, 0x83, 0x77, + 0xc1, 0x28, 0x0f, 0x66, 0xc0, 0x2a, 0xcb, 0x16, 0x66, 0xcb, 0xad, 0x78, 0x0c, 0xa7, 0xc5, 0x63, + 0xa4, 0xb7, 0x78, 0xd4, 0x60, 0x92, 0xd2, 0xbb, 0xac, 0x1b, 0xf3, 0xc4, 0x3b, 0x6b, 0x57, 0xab, + 0xa6, 0x57, 0x25, 0x96, 0xd7, 0x6f, 0x24, 0x24, 0xc8, 0xba, 0xbe, 0x0b, 0xcb, 0x20, 0x3c, 0x04, + 0xc1, 0xb3, 0xfc, 0x15, 0x82, 0xdd, 0x09, 0x87, 0x72, 0x39, 0x69, 0xf1, 0x12, 0xab, 0xf4, 0xe0, + 0x8d, 0x5a, 0x68, 0x65, 0xb0, 0x57, 0xf4, 0xeb, 0x24, 0x78, 0x6e, 0xbf, 0xa2, 0x44, 0x6b, 0xee, + 0x70, 0xcf, 0x35, 0xf7, 0x89, 0x28, 0xff, 0x31, 0x08, 0x83, 0x92, 0x3b, 0xd6, 0xd2, 0x4b, 0x54, + 0x5d, 0x39, 0xa1, 0xea, 0x32, 0x37, 0xec, 0x46, 0x87, 0xcd, 0x9e, 0x8e, 0x92, 0x6b, 0xc3, 0xce, + 0x10, 0x59, 0x8d, 0x18, 0xc4, 0x74, 0x06, 0x7a, 0x3f, 0x3f, 0x47, 0x20, 0xc5, 0x9d, 0xc8, 0xa5, + 0x95, 0x20, 0x5b, 0xf3, 0x97, 0x1a, 0x84, 0xf9, 0xcd, 0x6a, 0xc1, 0xf3, 0x60, 0x73, 0xf5, 0x0e, + 0x2f, 0x9d, 0x0c, 0xd6, 0x69, 0x63, 0xde, 0xb2, 0xef, 0x2c, 0x90, 0x72, 0x85, 0x0c, 0x3a, 0x61, + 0xbf, 0x13, 0x45, 0x30, 0xe1, 0x64, 0x2e, 0xcc, 0x34, 0x6c, 0xd6, 0xa3, 0xaf, 0x78, 0xea, 0xb6, + 0x2f, 0x0f, 0x36, 0x7f, 0xbf, 0x49, 0x45, 0xfb, 0xd4, 0x24, 0xf1, 0x3f, 0x08, 0xf6, 0xa5, 0xc2, + 0xe4, 0xaa, 0x5e, 0x84, 0x2d, 0x6d, 0xf2, 0x75, 0x93, 0xce, 0x1d, 0xb6, 0x4f, 0x47, 0x4e, 0x7f, + 0x29, 0x6a, 0xec, 0x35, 0x4b, 0xe4, 0x0e, 0x43, 0xdd, 0x77, 0x78, 0x4e, 0xc1, 0x2e, 0x87, 0x7a, + 0x2a, 0xb6, 0x0a, 0x59, 0x51, 0xdc, 0x64, 0x77, 0x62, 0x78, 0x6a, 0x78, 0x7a, 0x44, 0xdb, 0xe9, + 0xb4, 0x15, 0xce, 0xab, 0x62, 0x83, 0xfc, 0x36, 0x2f, 0xad, 0x31, 0xc0, 0x78, 0x40, 0x26, 0x61, + 0xb4, 0xe5, 0x0f, 0x51, 0x7f, 0xad, 0x85, 0x90, 0x2a, 0x99, 0xae, 0x55, 0xf9, 0x40, 0x14, 0x9e, + 0xd6, 0xe1, 0xa7, 0x8d, 0xf9, 0xbe, 0x25, 0x39, 0x0c, 0xe3, 0x5c, 0x12, 0xdd, 0x98, 0xef, 0xd0, + 0x02, 0x3b, 0xe2, 0xfe, 0xb5, 0x44, 0xb8, 0x03, 0xbb, 0x62, 0x71, 0x0c, 0x5c, 0x81, 0x1b, 0xbc, + 0x07, 0xbe, 0x48, 0xee, 0x06, 0x31, 0xd1, 0x18, 0x84, 0x7e, 0xfb, 0xeb, 0xef, 0x11, 0x4c, 0x25, + 0xfb, 0xe6, 0xcc, 0x66, 0x20, 0x67, 0x91, 0xbb, 0xad, 0x0b, 0x53, 0xe4, 0xfc, 0xe9, 0x51, 0x23, + 0xda, 0x36, 0xab, 0xd3, 0x76, 0xa0, 0xc5, 0x6c, 0xe6, 0x87, 0xed, 0xb0, 0x8e, 0xa2, 0xc6, 0xdf, + 0x22, 0xd8, 0xc0, 0x9b, 0x50, 0x7c, 0x20, 0x21, 0xf3, 0x63, 0x7e, 0x58, 0x90, 0x0e, 0xae, 0x6a, + 0x2f, 0xe3, 0x2f, 0x9f, 0x79, 0xff, 0xd1, 0x1f, 0x9f, 0x65, 0x5e, 0xc0, 0x27, 0x55, 0xb3, 0x64, + 0x24, 0xfd, 0x2e, 0xe2, 0xaa, 0x8b, 0x2d, 0xa1, 0x9b, 0xaa, 0x2f, 0xbf, 0xab, 0x2e, 0xf2, 0xa0, + 0x34, 0xf1, 0x27, 0x08, 0xb2, 0x62, 0x04, 0xc4, 0xab, 0x39, 0x5d, 0x5c, 0x70, 0xe9, 0xd9, 0xd5, + 0x6d, 0xe6, 0x58, 0xff, 0x4f, 0xb1, 0xee, 0xc1, 0xbb, 0x53, 0xb1, 0xe2, 0x1f, 0x11, 0xe0, 0xce, + 0xd9, 0x14, 0x1f, 0x4b, 0x3d, 0x2b, 0x69, 0xac, 0x96, 0x8e, 0x77, 0x6b, 0xc6, 0xc1, 0x9e, 0xa2, + 0x60, 0x4f, 0xe0, 0xe3, 0xf1, 0x60, 0x03, 0x43, 0x5f, 0xdb, 0xe0, 0xa1, 0xd9, 0x62, 0xf1, 0x93, + 0xcf, 0xa2, 0x63, 0x30, 0x5c, 0x81, 0x45, 0xd2, 0x8c, 0xba, 0x02, 0x8b, 0xc4, 0xf9, 0x53, 0xbe, + 0x44, 0x59, 0xcc, 0xe2, 0x73, 0xbd, 0x5f, 0x0f, 0x35, 0x3c, 0xb5, 0xe2, 0x2f, 0x32, 0x90, 0x8b, + 0x9d, 0xac, 0xf0, 0x89, 0xd5, 0x40, 0x8c, 0x1b, 0x1e, 0xa5, 0xe7, 0x7b, 0xb0, 0xe4, 0xfc, 0x3e, + 0x44, 0x94, 0xe0, 0x7b, 0x08, 0xbf, 0xdb, 0x0f, 0xc3, 0xe8, 0x24, 0xa8, 0x8a, 0x91, 0x52, 0x5d, + 0x6c, 0x1b, 0x4e, 0x9b, 0x2a, 0x2b, 0x0e, 0xa1, 0x17, 0x6c, 0xa1, 0x89, 0x7f, 0x43, 0xb0, 0xa5, + 0xbd, 0xbb, 0xc7, 0x47, 0xd2, 0x98, 0x25, 0x4c, 0x70, 0xd2, 0xd1, 0xee, 0x8c, 0xb8, 0x12, 0x6f, + 0x51, 0x21, 0x6e, 0xe2, 0xeb, 0x7d, 0xe8, 0xd0, 0xf1, 0x3d, 0xec, 0xaa, 0x8b, 0xa2, 0xb0, 0x36, + 0xf1, 0xcf, 0x08, 0xb6, 0x76, 0xcc, 0x2f, 0xb8, 0x2b, 0xb4, 0x41, 0x56, 0x1e, 0xeb, 0xd2, 0x8a, + 0x93, 0xbc, 0x46, 0x49, 0x5e, 0xc2, 0x17, 0xd6, 0x94, 0x24, 0x7e, 0x84, 0xe0, 0x7f, 0x91, 0xd1, + 0x01, 0x1f, 0x5e, 0x19, 0x5f, 0x74, 0xae, 0x91, 0x0a, 0x5d, 0x58, 0x70, 0x36, 0x6f, 0x50, 0x36, + 0xaf, 0xe3, 0x6b, 0xfd, 0xb3, 0xa9, 0x31, 0xd7, 0x91, 0x78, 0xfd, 0x89, 0x20, 0x17, 0xdb, 0xaa, + 0xa6, 0xa7, 0x6a, 0xda, 0xb0, 0x92, 0x9e, 0xaa, 0xa9, 0xc3, 0x86, 0x7c, 0x83, 0xb2, 0xbd, 0x8a, + 0xaf, 0xf4, 0xcf, 0x56, 0x37, 0xe6, 0x23, 0x4c, 0x9f, 0x20, 0xd8, 0x1e, 0xdf, 0x94, 0xe3, 0xee, + 0x01, 0x07, 0x77, 0xf4, 0x64, 0x2f, 0xa6, 0x9c, 0xec, 0x4d, 0x4a, 0xf6, 0x35, 0xac, 0xad, 0x09, + 0xd9, 0x28, 0xa5, 0x8f, 0x33, 0xb0, 0xb5, 0xa3, 0xd9, 0x4d, 0xcf, 0xc3, 0xa4, 0xa6, 0x3d, 0x3d, + 0x0f, 0x13, 0x3b, 0xea, 0x35, 0x2a, 0xbb, 0x71, 0xe5, 0x26, 0x65, 0x14, 0x68, 0xaa, 0xf5, 0x00, + 0x50, 0xd1, 0xe1, 0xb4, 0xff, 0x46, 0xb0, 0x29, 0xda, 0xf4, 0xe2, 0xc2, 0xea, 0x38, 0x85, 0x1a, + 0x75, 0x69, 0xa6, 0x1b, 0x13, 0xae, 0xc1, 0x3b, 0x54, 0x82, 0x06, 0xf6, 0x06, 0xa3, 0x40, 0xa4, + 0xf3, 0x8f, 0x50, 0xf7, 0x6f, 0x3f, 0x7e, 0x8c, 0x60, 0x5b, 0x4c, 0x5f, 0x8c, 0x53, 0x1b, 0x85, + 0xe4, 0x26, 0x5d, 0x7a, 0xae, 0x6b, 0x3b, 0x2e, 0xc3, 0x65, 0x2a, 0xc3, 0xab, 0xf8, 0x7c, 0x1f, + 0x32, 0x44, 0x3a, 0xf8, 0x33, 0xe7, 0x1f, 0x2c, 0xe5, 0xd1, 0xc3, 0xa5, 0x3c, 0xfa, 0x7d, 0x29, + 0x8f, 0x3e, 0x5d, 0xce, 0x0f, 0x3d, 0x5c, 0xce, 0x0f, 0x3d, 0x5e, 0xce, 0x0f, 0xdd, 0x54, 0x2a, + 0xa6, 0x37, 0x57, 0x2f, 0x29, 0x86, 0x5d, 0x55, 0xf9, 0x9f, 0x86, 0x66, 0xc9, 0x38, 0x24, 0xfe, + 0x10, 0x3c, 0x7c, 0xf4, 0x90, 0x38, 0xda, 0xbb, 0xe7, 0x10, 0xb7, 0xb4, 0x9e, 0xfe, 0x86, 0x7b, + 0xe4, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x58, 0xb6, 0x16, 0xbf, 0x1c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Channel queries an IBC Channel. + Channel(ctx context.Context, in *QueryChannelRequest, opts ...grpc.CallOption) (*QueryChannelResponse, error) + // Channels queries all the IBC channels of a chain. + Channels(ctx context.Context, in *QueryChannelsRequest, opts ...grpc.CallOption) (*QueryChannelsResponse, error) + // ConnectionChannels queries all the channels associated with a connection + // end. + ConnectionChannels(ctx context.Context, in *QueryConnectionChannelsRequest, opts ...grpc.CallOption) (*QueryConnectionChannelsResponse, error) + // ChannelClientState queries for the client state for the channel associated + // with the provided channel identifiers. + ChannelClientState(ctx context.Context, in *QueryChannelClientStateRequest, opts ...grpc.CallOption) (*QueryChannelClientStateResponse, error) + // ChannelConsensusState queries for the consensus state for the channel + // associated with the provided channel identifiers. + ChannelConsensusState(ctx context.Context, in *QueryChannelConsensusStateRequest, opts ...grpc.CallOption) (*QueryChannelConsensusStateResponse, error) + // PacketCommitment queries a stored packet commitment hash. + PacketCommitment(ctx context.Context, in *QueryPacketCommitmentRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentResponse, error) + // PacketCommitments returns all the packet commitments hashes associated + // with a channel. + PacketCommitments(ctx context.Context, in *QueryPacketCommitmentsRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentsResponse, error) + // PacketReceipt queries if a given packet sequence has been received on the + // queried chain + PacketReceipt(ctx context.Context, in *QueryPacketReceiptRequest, opts ...grpc.CallOption) (*QueryPacketReceiptResponse, error) + // PacketAcknowledgement queries a stored packet acknowledgement hash. + PacketAcknowledgement(ctx context.Context, in *QueryPacketAcknowledgementRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementResponse, error) + // PacketAcknowledgements returns all the packet acknowledgements associated + // with a channel. + PacketAcknowledgements(ctx context.Context, in *QueryPacketAcknowledgementsRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementsResponse, error) + // UnreceivedPackets returns all the unreceived IBC packets associated with a + // channel and sequences. + UnreceivedPackets(ctx context.Context, in *QueryUnreceivedPacketsRequest, opts ...grpc.CallOption) (*QueryUnreceivedPacketsResponse, error) + // UnreceivedAcks returns all the unreceived IBC acknowledgements associated + // with a channel and sequences. + UnreceivedAcks(ctx context.Context, in *QueryUnreceivedAcksRequest, opts ...grpc.CallOption) (*QueryUnreceivedAcksResponse, error) + // NextSequenceReceive returns the next receive sequence for a given channel. + NextSequenceReceive(ctx context.Context, in *QueryNextSequenceReceiveRequest, opts ...grpc.CallOption) (*QueryNextSequenceReceiveResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Channel(ctx context.Context, in *QueryChannelRequest, opts ...grpc.CallOption) (*QueryChannelResponse, error) { + out := new(QueryChannelResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/Channel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Channels(ctx context.Context, in *QueryChannelsRequest, opts ...grpc.CallOption) (*QueryChannelsResponse, error) { + out := new(QueryChannelsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/Channels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ConnectionChannels(ctx context.Context, in *QueryConnectionChannelsRequest, opts ...grpc.CallOption) (*QueryConnectionChannelsResponse, error) { + out := new(QueryConnectionChannelsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ConnectionChannels", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ChannelClientState(ctx context.Context, in *QueryChannelClientStateRequest, opts ...grpc.CallOption) (*QueryChannelClientStateResponse, error) { + out := new(QueryChannelClientStateResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ChannelClientState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ChannelConsensusState(ctx context.Context, in *QueryChannelConsensusStateRequest, opts ...grpc.CallOption) (*QueryChannelConsensusStateResponse, error) { + out := new(QueryChannelConsensusStateResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/ChannelConsensusState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) PacketCommitment(ctx context.Context, in *QueryPacketCommitmentRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentResponse, error) { + out := new(QueryPacketCommitmentResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketCommitment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) PacketCommitments(ctx context.Context, in *QueryPacketCommitmentsRequest, opts ...grpc.CallOption) (*QueryPacketCommitmentsResponse, error) { + out := new(QueryPacketCommitmentsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketCommitments", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) PacketReceipt(ctx context.Context, in *QueryPacketReceiptRequest, opts ...grpc.CallOption) (*QueryPacketReceiptResponse, error) { + out := new(QueryPacketReceiptResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketReceipt", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) PacketAcknowledgement(ctx context.Context, in *QueryPacketAcknowledgementRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementResponse, error) { + out := new(QueryPacketAcknowledgementResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketAcknowledgement", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) PacketAcknowledgements(ctx context.Context, in *QueryPacketAcknowledgementsRequest, opts ...grpc.CallOption) (*QueryPacketAcknowledgementsResponse, error) { + out := new(QueryPacketAcknowledgementsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/PacketAcknowledgements", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) UnreceivedPackets(ctx context.Context, in *QueryUnreceivedPacketsRequest, opts ...grpc.CallOption) (*QueryUnreceivedPacketsResponse, error) { + out := new(QueryUnreceivedPacketsResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/UnreceivedPackets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) UnreceivedAcks(ctx context.Context, in *QueryUnreceivedAcksRequest, opts ...grpc.CallOption) (*QueryUnreceivedAcksResponse, error) { + out := new(QueryUnreceivedAcksResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/UnreceivedAcks", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) NextSequenceReceive(ctx context.Context, in *QueryNextSequenceReceiveRequest, opts ...grpc.CallOption) (*QueryNextSequenceReceiveResponse, error) { + out := new(QueryNextSequenceReceiveResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Query/NextSequenceReceive", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Channel queries an IBC Channel. + Channel(context.Context, *QueryChannelRequest) (*QueryChannelResponse, error) + // Channels queries all the IBC channels of a chain. + Channels(context.Context, *QueryChannelsRequest) (*QueryChannelsResponse, error) + // ConnectionChannels queries all the channels associated with a connection + // end. + ConnectionChannels(context.Context, *QueryConnectionChannelsRequest) (*QueryConnectionChannelsResponse, error) + // ChannelClientState queries for the client state for the channel associated + // with the provided channel identifiers. + ChannelClientState(context.Context, *QueryChannelClientStateRequest) (*QueryChannelClientStateResponse, error) + // ChannelConsensusState queries for the consensus state for the channel + // associated with the provided channel identifiers. + ChannelConsensusState(context.Context, *QueryChannelConsensusStateRequest) (*QueryChannelConsensusStateResponse, error) + // PacketCommitment queries a stored packet commitment hash. + PacketCommitment(context.Context, *QueryPacketCommitmentRequest) (*QueryPacketCommitmentResponse, error) + // PacketCommitments returns all the packet commitments hashes associated + // with a channel. + PacketCommitments(context.Context, *QueryPacketCommitmentsRequest) (*QueryPacketCommitmentsResponse, error) + // PacketReceipt queries if a given packet sequence has been received on the + // queried chain + PacketReceipt(context.Context, *QueryPacketReceiptRequest) (*QueryPacketReceiptResponse, error) + // PacketAcknowledgement queries a stored packet acknowledgement hash. + PacketAcknowledgement(context.Context, *QueryPacketAcknowledgementRequest) (*QueryPacketAcknowledgementResponse, error) + // PacketAcknowledgements returns all the packet acknowledgements associated + // with a channel. + PacketAcknowledgements(context.Context, *QueryPacketAcknowledgementsRequest) (*QueryPacketAcknowledgementsResponse, error) + // UnreceivedPackets returns all the unreceived IBC packets associated with a + // channel and sequences. + UnreceivedPackets(context.Context, *QueryUnreceivedPacketsRequest) (*QueryUnreceivedPacketsResponse, error) + // UnreceivedAcks returns all the unreceived IBC acknowledgements associated + // with a channel and sequences. + UnreceivedAcks(context.Context, *QueryUnreceivedAcksRequest) (*QueryUnreceivedAcksResponse, error) + // NextSequenceReceive returns the next receive sequence for a given channel. + NextSequenceReceive(context.Context, *QueryNextSequenceReceiveRequest) (*QueryNextSequenceReceiveResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Channel(ctx context.Context, req *QueryChannelRequest) (*QueryChannelResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Channel not implemented") +} +func (*UnimplementedQueryServer) Channels(ctx context.Context, req *QueryChannelsRequest) (*QueryChannelsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Channels not implemented") +} +func (*UnimplementedQueryServer) ConnectionChannels(ctx context.Context, req *QueryConnectionChannelsRequest) (*QueryConnectionChannelsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConnectionChannels not implemented") +} +func (*UnimplementedQueryServer) ChannelClientState(ctx context.Context, req *QueryChannelClientStateRequest) (*QueryChannelClientStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChannelClientState not implemented") +} +func (*UnimplementedQueryServer) ChannelConsensusState(ctx context.Context, req *QueryChannelConsensusStateRequest) (*QueryChannelConsensusStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChannelConsensusState not implemented") +} +func (*UnimplementedQueryServer) PacketCommitment(ctx context.Context, req *QueryPacketCommitmentRequest) (*QueryPacketCommitmentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PacketCommitment not implemented") +} +func (*UnimplementedQueryServer) PacketCommitments(ctx context.Context, req *QueryPacketCommitmentsRequest) (*QueryPacketCommitmentsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PacketCommitments not implemented") +} +func (*UnimplementedQueryServer) PacketReceipt(ctx context.Context, req *QueryPacketReceiptRequest) (*QueryPacketReceiptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PacketReceipt not implemented") +} +func (*UnimplementedQueryServer) PacketAcknowledgement(ctx context.Context, req *QueryPacketAcknowledgementRequest) (*QueryPacketAcknowledgementResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PacketAcknowledgement not implemented") +} +func (*UnimplementedQueryServer) PacketAcknowledgements(ctx context.Context, req *QueryPacketAcknowledgementsRequest) (*QueryPacketAcknowledgementsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PacketAcknowledgements not implemented") +} +func (*UnimplementedQueryServer) UnreceivedPackets(ctx context.Context, req *QueryUnreceivedPacketsRequest) (*QueryUnreceivedPacketsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnreceivedPackets not implemented") +} +func (*UnimplementedQueryServer) UnreceivedAcks(ctx context.Context, req *QueryUnreceivedAcksRequest) (*QueryUnreceivedAcksResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnreceivedAcks not implemented") +} +func (*UnimplementedQueryServer) NextSequenceReceive(ctx context.Context, req *QueryNextSequenceReceiveRequest) (*QueryNextSequenceReceiveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NextSequenceReceive not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Channel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryChannelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Channel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/Channel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Channel(ctx, req.(*QueryChannelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Channels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Channels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/Channels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Channels(ctx, req.(*QueryChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ConnectionChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryConnectionChannelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ConnectionChannels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/ConnectionChannels", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ConnectionChannels(ctx, req.(*QueryConnectionChannelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ChannelClientState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryChannelClientStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ChannelClientState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/ChannelClientState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ChannelClientState(ctx, req.(*QueryChannelClientStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ChannelConsensusState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryChannelConsensusStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ChannelConsensusState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/ChannelConsensusState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ChannelConsensusState(ctx, req.(*QueryChannelConsensusStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_PacketCommitment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPacketCommitmentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).PacketCommitment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/PacketCommitment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).PacketCommitment(ctx, req.(*QueryPacketCommitmentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_PacketCommitments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPacketCommitmentsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).PacketCommitments(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/PacketCommitments", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).PacketCommitments(ctx, req.(*QueryPacketCommitmentsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_PacketReceipt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPacketReceiptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).PacketReceipt(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/PacketReceipt", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).PacketReceipt(ctx, req.(*QueryPacketReceiptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_PacketAcknowledgement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPacketAcknowledgementRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).PacketAcknowledgement(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/PacketAcknowledgement", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).PacketAcknowledgement(ctx, req.(*QueryPacketAcknowledgementRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_PacketAcknowledgements_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPacketAcknowledgementsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).PacketAcknowledgements(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/PacketAcknowledgements", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).PacketAcknowledgements(ctx, req.(*QueryPacketAcknowledgementsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_UnreceivedPackets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryUnreceivedPacketsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).UnreceivedPackets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/UnreceivedPackets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).UnreceivedPackets(ctx, req.(*QueryUnreceivedPacketsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_UnreceivedAcks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryUnreceivedAcksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).UnreceivedAcks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/UnreceivedAcks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).UnreceivedAcks(ctx, req.(*QueryUnreceivedAcksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_NextSequenceReceive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryNextSequenceReceiveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).NextSequenceReceive(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Query/NextSequenceReceive", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).NextSequenceReceive(ctx, req.(*QueryNextSequenceReceiveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibcgo.core.channel.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Channel", + Handler: _Query_Channel_Handler, + }, + { + MethodName: "Channels", + Handler: _Query_Channels_Handler, + }, + { + MethodName: "ConnectionChannels", + Handler: _Query_ConnectionChannels_Handler, + }, + { + MethodName: "ChannelClientState", + Handler: _Query_ChannelClientState_Handler, + }, + { + MethodName: "ChannelConsensusState", + Handler: _Query_ChannelConsensusState_Handler, + }, + { + MethodName: "PacketCommitment", + Handler: _Query_PacketCommitment_Handler, + }, + { + MethodName: "PacketCommitments", + Handler: _Query_PacketCommitments_Handler, + }, + { + MethodName: "PacketReceipt", + Handler: _Query_PacketReceipt_Handler, + }, + { + MethodName: "PacketAcknowledgement", + Handler: _Query_PacketAcknowledgement_Handler, + }, + { + MethodName: "PacketAcknowledgements", + Handler: _Query_PacketAcknowledgements_Handler, + }, + { + MethodName: "UnreceivedPackets", + Handler: _Query_UnreceivedPackets_Handler, + }, + { + MethodName: "UnreceivedAcks", + Handler: _Query_UnreceivedAcks_Handler, + }, + { + MethodName: "NextSequenceReceive", + Handler: _Query_NextSequenceReceive_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibcgo/core/channel/v1/query.proto", +} + +func (m *QueryChannelRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChannelRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChannelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChannelResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChannelResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChannelResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if m.Channel != nil { + { + size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChannelsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChannelsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChannelsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChannelsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChannelsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChannelsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Channels) > 0 { + for iNdEx := len(m.Channels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Channels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionChannelsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionChannelsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionChannelsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Connection) > 0 { + i -= len(m.Connection) + copy(dAtA[i:], m.Connection) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Connection))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryConnectionChannelsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryConnectionChannelsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryConnectionChannelsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Channels) > 0 { + for iNdEx := len(m.Channels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Channels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryChannelClientStateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChannelClientStateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChannelClientStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChannelClientStateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChannelClientStateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChannelClientStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if m.IdentifiedClientState != nil { + { + size, err := m.IdentifiedClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChannelConsensusStateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChannelConsensusStateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChannelConsensusStateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHeight != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RevisionHeight)) + i-- + dAtA[i] = 0x20 + } + if m.RevisionNumber != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RevisionNumber)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryChannelConsensusStateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryChannelConsensusStateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryChannelConsensusStateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x1a + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0x12 + } + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketCommitmentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketCommitmentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketCommitmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sequence != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketCommitmentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketCommitmentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketCommitmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if len(m.Commitment) > 0 { + i -= len(m.Commitment) + copy(dAtA[i:], m.Commitment) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Commitment))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketCommitmentsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketCommitmentsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketCommitmentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketCommitmentsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketCommitmentsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketCommitmentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Commitments) > 0 { + for iNdEx := len(m.Commitments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Commitments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketReceiptRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketReceiptRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketReceiptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sequence != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketReceiptResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketReceiptResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketReceiptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x1a + } + if m.Received { + i-- + if m.Received { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketAcknowledgementRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketAcknowledgementRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketAcknowledgementRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sequence != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketAcknowledgementResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketAcknowledgementResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketAcknowledgementResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if len(m.Acknowledgement) > 0 { + i -= len(m.Acknowledgement) + copy(dAtA[i:], m.Acknowledgement) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Acknowledgement))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketAcknowledgementsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketAcknowledgementsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketAcknowledgementsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPacketAcknowledgementsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPacketAcknowledgementsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPacketAcknowledgementsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Acknowledgements) > 0 { + for iNdEx := len(m.Acknowledgements) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Acknowledgements[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryUnreceivedPacketsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryUnreceivedPacketsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryUnreceivedPacketsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PacketCommitmentSequences) > 0 { + dAtA23 := make([]byte, len(m.PacketCommitmentSequences)*10) + var j22 int + for _, num := range m.PacketCommitmentSequences { + for num >= 1<<7 { + dAtA23[j22] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j22++ + } + dAtA23[j22] = uint8(num) + j22++ + } + i -= j22 + copy(dAtA[i:], dAtA23[:j22]) + i = encodeVarintQuery(dAtA, i, uint64(j22)) + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryUnreceivedPacketsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryUnreceivedPacketsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryUnreceivedPacketsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Sequences) > 0 { + dAtA26 := make([]byte, len(m.Sequences)*10) + var j25 int + for _, num := range m.Sequences { + for num >= 1<<7 { + dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j25++ + } + dAtA26[j25] = uint8(num) + j25++ + } + i -= j25 + copy(dAtA[i:], dAtA26[:j25]) + i = encodeVarintQuery(dAtA, i, uint64(j25)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryUnreceivedAcksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryUnreceivedAcksRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryUnreceivedAcksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PacketAckSequences) > 0 { + dAtA28 := make([]byte, len(m.PacketAckSequences)*10) + var j27 int + for _, num := range m.PacketAckSequences { + for num >= 1<<7 { + dAtA28[j27] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j27++ + } + dAtA28[j27] = uint8(num) + j27++ + } + i -= j27 + copy(dAtA[i:], dAtA28[:j27]) + i = encodeVarintQuery(dAtA, i, uint64(j27)) + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryUnreceivedAcksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryUnreceivedAcksResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryUnreceivedAcksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Sequences) > 0 { + dAtA31 := make([]byte, len(m.Sequences)*10) + var j30 int + for _, num := range m.Sequences { + for num >= 1<<7 { + dAtA31[j30] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j30++ + } + dAtA31[j30] = uint8(num) + j30++ + } + i -= j30 + copy(dAtA[i:], dAtA31[:j30]) + i = encodeVarintQuery(dAtA, i, uint64(j30)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryNextSequenceReceiveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryNextSequenceReceiveRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryNextSequenceReceiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryNextSequenceReceiveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryNextSequenceReceiveResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryNextSequenceReceiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Proof) > 0 { + i -= len(m.Proof) + copy(dAtA[i:], m.Proof) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Proof))) + i-- + dAtA[i] = 0x12 + } + if m.NextSequenceReceive != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.NextSequenceReceive)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryChannelRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryChannelResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Channel != nil { + l = m.Channel.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryChannelsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryChannelsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Channels) > 0 { + for _, e := range m.Channels { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = m.Height.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryConnectionChannelsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Connection) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryConnectionChannelsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Channels) > 0 { + for _, e := range m.Channels { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = m.Height.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryChannelClientStateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryChannelClientStateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IdentifiedClientState != nil { + l = m.IdentifiedClientState.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryChannelConsensusStateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.RevisionNumber != 0 { + n += 1 + sovQuery(uint64(m.RevisionNumber)) + } + if m.RevisionHeight != 0 { + n += 1 + sovQuery(uint64(m.RevisionHeight)) + } + return n +} + +func (m *QueryChannelConsensusStateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryPacketCommitmentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovQuery(uint64(m.Sequence)) + } + return n +} + +func (m *QueryPacketCommitmentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Commitment) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryPacketCommitmentsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryPacketCommitmentsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Commitments) > 0 { + for _, e := range m.Commitments { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = m.Height.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryPacketReceiptRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovQuery(uint64(m.Sequence)) + } + return n +} + +func (m *QueryPacketReceiptResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Received { + n += 2 + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryPacketAcknowledgementRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovQuery(uint64(m.Sequence)) + } + return n +} + +func (m *QueryPacketAcknowledgementResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Acknowledgement) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryPacketAcknowledgementsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryPacketAcknowledgementsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Acknowledgements) > 0 { + for _, e := range m.Acknowledgements { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + l = m.Height.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryUnreceivedPacketsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.PacketCommitmentSequences) > 0 { + l = 0 + for _, e := range m.PacketCommitmentSequences { + l += sovQuery(uint64(e)) + } + n += 1 + sovQuery(uint64(l)) + l + } + return n +} + +func (m *QueryUnreceivedPacketsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sequences) > 0 { + l = 0 + for _, e := range m.Sequences { + l += sovQuery(uint64(e)) + } + n += 1 + sovQuery(uint64(l)) + l + } + l = m.Height.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryUnreceivedAcksRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if len(m.PacketAckSequences) > 0 { + l = 0 + for _, e := range m.PacketAckSequences { + l += sovQuery(uint64(e)) + } + n += 1 + sovQuery(uint64(l)) + l + } + return n +} + +func (m *QueryUnreceivedAcksResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sequences) > 0 { + l = 0 + for _, e := range m.Sequences { + l += sovQuery(uint64(e)) + } + n += 1 + sovQuery(uint64(l)) + l + } + l = m.Height.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryNextSequenceReceiveRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryNextSequenceReceiveResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NextSequenceReceive != 0 { + n += 1 + sovQuery(uint64(m.NextSequenceReceive)) + } + l = len(m.Proof) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryChannelRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChannelRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChannelRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChannelResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChannelResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChannelResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Channel == nil { + m.Channel = &Channel{} + } + if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChannelsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChannelsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChannelsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChannelsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChannelsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChannelsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Channels = append(m.Channels, &IdentifiedChannel{}) + if err := m.Channels[len(m.Channels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionChannelsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionChannelsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionChannelsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Connection = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryConnectionChannelsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryConnectionChannelsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryConnectionChannelsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Channels = append(m.Channels, &IdentifiedChannel{}) + if err := m.Channels[len(m.Channels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChannelClientStateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChannelClientStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChannelClientStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChannelClientStateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChannelClientStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChannelClientStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IdentifiedClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IdentifiedClientState == nil { + m.IdentifiedClientState = &types.IdentifiedClientState{} + } + if err := m.IdentifiedClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChannelConsensusStateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChannelConsensusStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChannelConsensusStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionNumber", wireType) + } + m.RevisionNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevisionNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHeight", wireType) + } + m.RevisionHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RevisionHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryChannelConsensusStateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryChannelConsensusStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryChannelConsensusStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types1.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketCommitmentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketCommitmentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketCommitmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketCommitmentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketCommitmentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketCommitmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commitment", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commitment = append(m.Commitment[:0], dAtA[iNdEx:postIndex]...) + if m.Commitment == nil { + m.Commitment = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketCommitmentsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketCommitmentsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketCommitmentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketCommitmentsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketCommitmentsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketCommitmentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commitments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commitments = append(m.Commitments, &PacketState{}) + if err := m.Commitments[len(m.Commitments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketReceiptRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketReceiptRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketReceiptRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketReceiptResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketReceiptResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketReceiptResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Received", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Received = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketAcknowledgementRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketAcknowledgementRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketAcknowledgementRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketAcknowledgementResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketAcknowledgementResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketAcknowledgementResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...) + if m.Acknowledgement == nil { + m.Acknowledgement = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketAcknowledgementsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketAcknowledgementsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketAcknowledgementsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPacketAcknowledgementsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPacketAcknowledgementsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPacketAcknowledgementsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Acknowledgements = append(m.Acknowledgements, &PacketState{}) + if err := m.Acknowledgements[len(m.Acknowledgements)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryUnreceivedPacketsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryUnreceivedPacketsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryUnreceivedPacketsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PacketCommitmentSequences = append(m.PacketCommitmentSequences, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PacketCommitmentSequences) == 0 { + m.PacketCommitmentSequences = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PacketCommitmentSequences = append(m.PacketCommitmentSequences, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PacketCommitmentSequences", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryUnreceivedPacketsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryUnreceivedPacketsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryUnreceivedPacketsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sequences = append(m.Sequences, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Sequences) == 0 { + m.Sequences = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sequences = append(m.Sequences, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Sequences", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryUnreceivedAcksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryUnreceivedAcksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryUnreceivedAcksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PacketAckSequences = append(m.PacketAckSequences, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PacketAckSequences) == 0 { + m.PacketAckSequences = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PacketAckSequences = append(m.PacketAckSequences, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PacketAckSequences", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryUnreceivedAcksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryUnreceivedAcksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryUnreceivedAcksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sequences = append(m.Sequences, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Sequences) == 0 { + m.Sequences = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sequences = append(m.Sequences, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Sequences", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryNextSequenceReceiveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryNextSequenceReceiveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryNextSequenceReceiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryNextSequenceReceiveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryNextSequenceReceiveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryNextSequenceReceiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextSequenceReceive", wireType) + } + m.NextSequenceReceive = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextSequenceReceive |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) + if m.Proof == nil { + m.Proof = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/04-channel/types/query.pb.gw.go b/core/04-channel/types/query.pb.gw.go new file mode 100644 index 0000000000..58be2aca14 --- /dev/null +++ b/core/04-channel/types/query.pb.gw.go @@ -0,0 +1,1792 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: ibcgo/core/channel/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage + +func request_Query_Channel_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChannelRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + msg, err := client.Channel(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Channel_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChannelRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + msg, err := server.Channel(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Channels_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Channels_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChannelsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Channels_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Channels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Channels_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChannelsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Channels_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Channels(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ConnectionChannels_0 = &utilities.DoubleArray{Encoding: map[string]int{"connection": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_ConnectionChannels_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionChannelsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["connection"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection") + } + + protoReq.Connection, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConnectionChannels_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ConnectionChannels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ConnectionChannels_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryConnectionChannelsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["connection"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "connection") + } + + protoReq.Connection, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "connection", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ConnectionChannels_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ConnectionChannels(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ChannelClientState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChannelClientStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + msg, err := client.ChannelClientState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ChannelClientState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChannelClientStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + msg, err := server.ChannelClientState(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ChannelConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChannelConsensusStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["revision_number"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number") + } + + protoReq.RevisionNumber, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err) + } + + val, ok = pathParams["revision_height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height") + } + + protoReq.RevisionHeight, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err) + } + + msg, err := client.ChannelConsensusState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ChannelConsensusState_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryChannelConsensusStateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["revision_number"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_number") + } + + protoReq.RevisionNumber, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_number", err) + } + + val, ok = pathParams["revision_height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "revision_height") + } + + protoReq.RevisionHeight, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "revision_height", err) + } + + msg, err := server.ChannelConsensusState(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_PacketCommitment_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketCommitmentRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["sequence"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence") + } + + protoReq.Sequence, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err) + } + + msg, err := client.PacketCommitment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_PacketCommitment_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketCommitmentRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["sequence"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence") + } + + protoReq.Sequence, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err) + } + + msg, err := server.PacketCommitment(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_PacketCommitments_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_id": 0, "port_id": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_Query_PacketCommitments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketCommitmentsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PacketCommitments_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.PacketCommitments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_PacketCommitments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketCommitmentsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PacketCommitments_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.PacketCommitments(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_PacketReceipt_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketReceiptRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["sequence"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence") + } + + protoReq.Sequence, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err) + } + + msg, err := client.PacketReceipt(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_PacketReceipt_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketReceiptRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["sequence"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence") + } + + protoReq.Sequence, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err) + } + + msg, err := server.PacketReceipt(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_PacketAcknowledgement_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketAcknowledgementRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["sequence"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence") + } + + protoReq.Sequence, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err) + } + + msg, err := client.PacketAcknowledgement(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_PacketAcknowledgement_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketAcknowledgementRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["sequence"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "sequence") + } + + protoReq.Sequence, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "sequence", err) + } + + msg, err := server.PacketAcknowledgement(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_PacketAcknowledgements_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_id": 0, "port_id": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_Query_PacketAcknowledgements_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketAcknowledgementsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PacketAcknowledgements_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.PacketAcknowledgements(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_PacketAcknowledgements_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPacketAcknowledgementsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_PacketAcknowledgements_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.PacketAcknowledgements(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_UnreceivedPackets_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryUnreceivedPacketsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["packet_commitment_sequences"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "packet_commitment_sequences") + } + + protoReq.PacketCommitmentSequences, err = runtime.Uint64Slice(val, ",") + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "packet_commitment_sequences", err) + } + + msg, err := client.UnreceivedPackets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_UnreceivedPackets_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryUnreceivedPacketsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["packet_commitment_sequences"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "packet_commitment_sequences") + } + + protoReq.PacketCommitmentSequences, err = runtime.Uint64Slice(val, ",") + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "packet_commitment_sequences", err) + } + + msg, err := server.UnreceivedPackets(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_UnreceivedAcks_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryUnreceivedAcksRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["packet_ack_sequences"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "packet_ack_sequences") + } + + protoReq.PacketAckSequences, err = runtime.Uint64Slice(val, ",") + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "packet_ack_sequences", err) + } + + msg, err := client.UnreceivedAcks(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_UnreceivedAcks_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryUnreceivedAcksRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + val, ok = pathParams["packet_ack_sequences"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "packet_ack_sequences") + } + + protoReq.PacketAckSequences, err = runtime.Uint64Slice(val, ",") + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "packet_ack_sequences", err) + } + + msg, err := server.UnreceivedAcks(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_NextSequenceReceive_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryNextSequenceReceiveRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + msg, err := client.NextSequenceReceive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_NextSequenceReceive_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryNextSequenceReceiveRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + val, ok = pathParams["port_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "port_id") + } + + protoReq.PortId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "port_id", err) + } + + msg, err := server.NextSequenceReceive(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features (such as grpc.SendHeader, etc) to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Channel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Channel_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Channel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Channels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Channels_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Channels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConnectionChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ConnectionChannels_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConnectionChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ChannelClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ChannelClientState_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ChannelClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ChannelConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ChannelConsensusState_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ChannelConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketCommitment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_PacketCommitment_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketCommitment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketCommitments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_PacketCommitments_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketCommitments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketReceipt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_PacketReceipt_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketReceipt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketAcknowledgement_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_PacketAcknowledgement_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketAcknowledgement_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketAcknowledgements_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_PacketAcknowledgements_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketAcknowledgements_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_UnreceivedPackets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_UnreceivedPackets_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_UnreceivedPackets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_UnreceivedAcks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_UnreceivedAcks_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_UnreceivedAcks_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_NextSequenceReceive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_NextSequenceReceive_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_NextSequenceReceive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Channel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Channel_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Channel_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Channels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Channels_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Channels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ConnectionChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ConnectionChannels_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ConnectionChannels_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ChannelClientState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ChannelClientState_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ChannelClientState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ChannelConsensusState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ChannelConsensusState_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ChannelConsensusState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketCommitment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_PacketCommitment_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketCommitment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketCommitments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_PacketCommitments_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketCommitments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketReceipt_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_PacketReceipt_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketReceipt_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketAcknowledgement_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_PacketAcknowledgement_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketAcknowledgement_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_PacketAcknowledgements_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_PacketAcknowledgements_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_PacketAcknowledgements_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_UnreceivedPackets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_UnreceivedPackets_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_UnreceivedPackets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_UnreceivedAcks_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_UnreceivedAcks_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_UnreceivedAcks_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_NextSequenceReceive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_NextSequenceReceive_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_NextSequenceReceive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Channel_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Channels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "core", "channel", "v1", "channels"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ConnectionChannels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"ibc", "core", "channel", "v1", "connections", "connection", "channels"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ChannelClientState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "client_state"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ChannelConsensusState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 2, 9, 1, 0, 4, 1, 5, 10, 2, 11, 1, 0, 4, 1, 5, 12}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "consensus_state", "revision", "revision_number", "height", "revision_height"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_PacketCommitment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_commitments", "sequence"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_PacketCommitments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_commitments"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_PacketReceipt_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_receipts", "sequence"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_PacketAcknowledgement_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_acks", "sequence"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_PacketAcknowledgements_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_acknowledgements"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_UnreceivedPackets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9, 2, 10}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_commitments", "packet_commitment_sequences", "unreceived_packets"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_UnreceivedAcks_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8, 1, 0, 4, 1, 5, 9, 2, 10}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "packet_commitments", "packet_ack_sequences", "unreceived_acks"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_NextSequenceReceive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7, 2, 8}, []string{"ibc", "core", "channel", "v1", "channels", "channel_id", "ports", "port_id", "next_sequence"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Channel_0 = runtime.ForwardResponseMessage + + forward_Query_Channels_0 = runtime.ForwardResponseMessage + + forward_Query_ConnectionChannels_0 = runtime.ForwardResponseMessage + + forward_Query_ChannelClientState_0 = runtime.ForwardResponseMessage + + forward_Query_ChannelConsensusState_0 = runtime.ForwardResponseMessage + + forward_Query_PacketCommitment_0 = runtime.ForwardResponseMessage + + forward_Query_PacketCommitments_0 = runtime.ForwardResponseMessage + + forward_Query_PacketReceipt_0 = runtime.ForwardResponseMessage + + forward_Query_PacketAcknowledgement_0 = runtime.ForwardResponseMessage + + forward_Query_PacketAcknowledgements_0 = runtime.ForwardResponseMessage + + forward_Query_UnreceivedPackets_0 = runtime.ForwardResponseMessage + + forward_Query_UnreceivedAcks_0 = runtime.ForwardResponseMessage + + forward_Query_NextSequenceReceive_0 = runtime.ForwardResponseMessage +) diff --git a/core/04-channel/types/tx.pb.go b/core/04-channel/types/tx.pb.go new file mode 100644 index 0000000000..9b8976ec99 --- /dev/null +++ b/core/04-channel/types/tx.pb.go @@ -0,0 +1,5264 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/channel/v1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/cosmos/ibc-go/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It +// is called by a relayer on Chain A. +type MsgChannelOpenInit struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + Channel Channel `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel"` + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgChannelOpenInit) Reset() { *m = MsgChannelOpenInit{} } +func (m *MsgChannelOpenInit) String() string { return proto.CompactTextString(m) } +func (*MsgChannelOpenInit) ProtoMessage() {} +func (*MsgChannelOpenInit) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{0} +} +func (m *MsgChannelOpenInit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelOpenInit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelOpenInit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelOpenInit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelOpenInit.Merge(m, src) +} +func (m *MsgChannelOpenInit) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelOpenInit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelOpenInit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelOpenInit proto.InternalMessageInfo + +// MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type. +type MsgChannelOpenInitResponse struct { +} + +func (m *MsgChannelOpenInitResponse) Reset() { *m = MsgChannelOpenInitResponse{} } +func (m *MsgChannelOpenInitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgChannelOpenInitResponse) ProtoMessage() {} +func (*MsgChannelOpenInitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{1} +} +func (m *MsgChannelOpenInitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelOpenInitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelOpenInitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelOpenInitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelOpenInitResponse.Merge(m, src) +} +func (m *MsgChannelOpenInitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelOpenInitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelOpenInitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelOpenInitResponse proto.InternalMessageInfo + +// MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel +// on Chain B. +type MsgChannelOpenTry struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + // in the case of crossing hello's, when both chains call OpenInit, we need + // the channel identifier of the previous channel in state INIT + PreviousChannelId string `protobuf:"bytes,2,opt,name=previous_channel_id,json=previousChannelId,proto3" json:"previous_channel_id,omitempty" yaml:"previous_channel_id"` + Channel Channel `protobuf:"bytes,3,opt,name=channel,proto3" json:"channel"` + CounterpartyVersion string `protobuf:"bytes,4,opt,name=counterparty_version,json=counterpartyVersion,proto3" json:"counterparty_version,omitempty" yaml:"counterparty_version"` + ProofInit []byte `protobuf:"bytes,5,opt,name=proof_init,json=proofInit,proto3" json:"proof_init,omitempty" yaml:"proof_init"` + ProofHeight types.Height `protobuf:"bytes,6,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + Signer string `protobuf:"bytes,7,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgChannelOpenTry) Reset() { *m = MsgChannelOpenTry{} } +func (m *MsgChannelOpenTry) String() string { return proto.CompactTextString(m) } +func (*MsgChannelOpenTry) ProtoMessage() {} +func (*MsgChannelOpenTry) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{2} +} +func (m *MsgChannelOpenTry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelOpenTry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelOpenTry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelOpenTry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelOpenTry.Merge(m, src) +} +func (m *MsgChannelOpenTry) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelOpenTry) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelOpenTry.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelOpenTry proto.InternalMessageInfo + +// MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type. +type MsgChannelOpenTryResponse struct { +} + +func (m *MsgChannelOpenTryResponse) Reset() { *m = MsgChannelOpenTryResponse{} } +func (m *MsgChannelOpenTryResponse) String() string { return proto.CompactTextString(m) } +func (*MsgChannelOpenTryResponse) ProtoMessage() {} +func (*MsgChannelOpenTryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{3} +} +func (m *MsgChannelOpenTryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelOpenTryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelOpenTryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelOpenTryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelOpenTryResponse.Merge(m, src) +} +func (m *MsgChannelOpenTryResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelOpenTryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelOpenTryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelOpenTryResponse proto.InternalMessageInfo + +// MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge +// the change of channel state to TRYOPEN on Chain B. +type MsgChannelOpenAck struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` + CounterpartyChannelId string `protobuf:"bytes,3,opt,name=counterparty_channel_id,json=counterpartyChannelId,proto3" json:"counterparty_channel_id,omitempty" yaml:"counterparty_channel_id"` + CounterpartyVersion string `protobuf:"bytes,4,opt,name=counterparty_version,json=counterpartyVersion,proto3" json:"counterparty_version,omitempty" yaml:"counterparty_version"` + ProofTry []byte `protobuf:"bytes,5,opt,name=proof_try,json=proofTry,proto3" json:"proof_try,omitempty" yaml:"proof_try"` + ProofHeight types.Height `protobuf:"bytes,6,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + Signer string `protobuf:"bytes,7,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgChannelOpenAck) Reset() { *m = MsgChannelOpenAck{} } +func (m *MsgChannelOpenAck) String() string { return proto.CompactTextString(m) } +func (*MsgChannelOpenAck) ProtoMessage() {} +func (*MsgChannelOpenAck) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{4} +} +func (m *MsgChannelOpenAck) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelOpenAck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelOpenAck.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelOpenAck) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelOpenAck.Merge(m, src) +} +func (m *MsgChannelOpenAck) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelOpenAck) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelOpenAck.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelOpenAck proto.InternalMessageInfo + +// MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type. +type MsgChannelOpenAckResponse struct { +} + +func (m *MsgChannelOpenAckResponse) Reset() { *m = MsgChannelOpenAckResponse{} } +func (m *MsgChannelOpenAckResponse) String() string { return proto.CompactTextString(m) } +func (*MsgChannelOpenAckResponse) ProtoMessage() {} +func (*MsgChannelOpenAckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{5} +} +func (m *MsgChannelOpenAckResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelOpenAckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelOpenAckResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelOpenAckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelOpenAckResponse.Merge(m, src) +} +func (m *MsgChannelOpenAckResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelOpenAckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelOpenAckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelOpenAckResponse proto.InternalMessageInfo + +// MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to +// acknowledge the change of channel state to OPEN on Chain A. +type MsgChannelOpenConfirm struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` + ProofAck []byte `protobuf:"bytes,3,opt,name=proof_ack,json=proofAck,proto3" json:"proof_ack,omitempty" yaml:"proof_ack"` + ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgChannelOpenConfirm) Reset() { *m = MsgChannelOpenConfirm{} } +func (m *MsgChannelOpenConfirm) String() string { return proto.CompactTextString(m) } +func (*MsgChannelOpenConfirm) ProtoMessage() {} +func (*MsgChannelOpenConfirm) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{6} +} +func (m *MsgChannelOpenConfirm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelOpenConfirm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelOpenConfirm.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelOpenConfirm) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelOpenConfirm.Merge(m, src) +} +func (m *MsgChannelOpenConfirm) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelOpenConfirm) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelOpenConfirm.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelOpenConfirm proto.InternalMessageInfo + +// MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response +// type. +type MsgChannelOpenConfirmResponse struct { +} + +func (m *MsgChannelOpenConfirmResponse) Reset() { *m = MsgChannelOpenConfirmResponse{} } +func (m *MsgChannelOpenConfirmResponse) String() string { return proto.CompactTextString(m) } +func (*MsgChannelOpenConfirmResponse) ProtoMessage() {} +func (*MsgChannelOpenConfirmResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{7} +} +func (m *MsgChannelOpenConfirmResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelOpenConfirmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelOpenConfirmResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelOpenConfirmResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelOpenConfirmResponse.Merge(m, src) +} +func (m *MsgChannelOpenConfirmResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelOpenConfirmResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelOpenConfirmResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelOpenConfirmResponse proto.InternalMessageInfo + +// MsgChannelCloseInit defines a msg sent by a Relayer to Chain A +// to close a channel with Chain B. +type MsgChannelCloseInit struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgChannelCloseInit) Reset() { *m = MsgChannelCloseInit{} } +func (m *MsgChannelCloseInit) String() string { return proto.CompactTextString(m) } +func (*MsgChannelCloseInit) ProtoMessage() {} +func (*MsgChannelCloseInit) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{8} +} +func (m *MsgChannelCloseInit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelCloseInit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelCloseInit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelCloseInit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelCloseInit.Merge(m, src) +} +func (m *MsgChannelCloseInit) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelCloseInit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelCloseInit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelCloseInit proto.InternalMessageInfo + +// MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type. +type MsgChannelCloseInitResponse struct { +} + +func (m *MsgChannelCloseInitResponse) Reset() { *m = MsgChannelCloseInitResponse{} } +func (m *MsgChannelCloseInitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgChannelCloseInitResponse) ProtoMessage() {} +func (*MsgChannelCloseInitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{9} +} +func (m *MsgChannelCloseInitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelCloseInitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelCloseInitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelCloseInitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelCloseInitResponse.Merge(m, src) +} +func (m *MsgChannelCloseInitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelCloseInitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelCloseInitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelCloseInitResponse proto.InternalMessageInfo + +// MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B +// to acknowledge the change of channel state to CLOSED on Chain A. +type MsgChannelCloseConfirm struct { + PortId string `protobuf:"bytes,1,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty" yaml:"port_id"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` + ProofInit []byte `protobuf:"bytes,3,opt,name=proof_init,json=proofInit,proto3" json:"proof_init,omitempty" yaml:"proof_init"` + ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgChannelCloseConfirm) Reset() { *m = MsgChannelCloseConfirm{} } +func (m *MsgChannelCloseConfirm) String() string { return proto.CompactTextString(m) } +func (*MsgChannelCloseConfirm) ProtoMessage() {} +func (*MsgChannelCloseConfirm) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{10} +} +func (m *MsgChannelCloseConfirm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelCloseConfirm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelCloseConfirm.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelCloseConfirm) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelCloseConfirm.Merge(m, src) +} +func (m *MsgChannelCloseConfirm) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelCloseConfirm) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelCloseConfirm.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelCloseConfirm proto.InternalMessageInfo + +// MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response +// type. +type MsgChannelCloseConfirmResponse struct { +} + +func (m *MsgChannelCloseConfirmResponse) Reset() { *m = MsgChannelCloseConfirmResponse{} } +func (m *MsgChannelCloseConfirmResponse) String() string { return proto.CompactTextString(m) } +func (*MsgChannelCloseConfirmResponse) ProtoMessage() {} +func (*MsgChannelCloseConfirmResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{11} +} +func (m *MsgChannelCloseConfirmResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgChannelCloseConfirmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgChannelCloseConfirmResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgChannelCloseConfirmResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgChannelCloseConfirmResponse.Merge(m, src) +} +func (m *MsgChannelCloseConfirmResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgChannelCloseConfirmResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgChannelCloseConfirmResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgChannelCloseConfirmResponse proto.InternalMessageInfo + +// MsgRecvPacket receives incoming IBC packet +type MsgRecvPacket struct { + Packet Packet `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet"` + ProofCommitment []byte `protobuf:"bytes,2,opt,name=proof_commitment,json=proofCommitment,proto3" json:"proof_commitment,omitempty" yaml:"proof_commitment"` + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + Signer string `protobuf:"bytes,4,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgRecvPacket) Reset() { *m = MsgRecvPacket{} } +func (m *MsgRecvPacket) String() string { return proto.CompactTextString(m) } +func (*MsgRecvPacket) ProtoMessage() {} +func (*MsgRecvPacket) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{12} +} +func (m *MsgRecvPacket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRecvPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRecvPacket.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRecvPacket) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRecvPacket.Merge(m, src) +} +func (m *MsgRecvPacket) XXX_Size() int { + return m.Size() +} +func (m *MsgRecvPacket) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRecvPacket.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRecvPacket proto.InternalMessageInfo + +// MsgRecvPacketResponse defines the Msg/RecvPacket response type. +type MsgRecvPacketResponse struct { +} + +func (m *MsgRecvPacketResponse) Reset() { *m = MsgRecvPacketResponse{} } +func (m *MsgRecvPacketResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRecvPacketResponse) ProtoMessage() {} +func (*MsgRecvPacketResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{13} +} +func (m *MsgRecvPacketResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRecvPacketResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRecvPacketResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRecvPacketResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRecvPacketResponse.Merge(m, src) +} +func (m *MsgRecvPacketResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRecvPacketResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRecvPacketResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRecvPacketResponse proto.InternalMessageInfo + +// MsgTimeout receives timed-out packet +type MsgTimeout struct { + Packet Packet `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet"` + ProofUnreceived []byte `protobuf:"bytes,2,opt,name=proof_unreceived,json=proofUnreceived,proto3" json:"proof_unreceived,omitempty" yaml:"proof_unreceived"` + ProofHeight types.Height `protobuf:"bytes,3,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + NextSequenceRecv uint64 `protobuf:"varint,4,opt,name=next_sequence_recv,json=nextSequenceRecv,proto3" json:"next_sequence_recv,omitempty" yaml:"next_sequence_recv"` + Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgTimeout) Reset() { *m = MsgTimeout{} } +func (m *MsgTimeout) String() string { return proto.CompactTextString(m) } +func (*MsgTimeout) ProtoMessage() {} +func (*MsgTimeout) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{14} +} +func (m *MsgTimeout) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTimeout) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTimeout.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTimeout) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTimeout.Merge(m, src) +} +func (m *MsgTimeout) XXX_Size() int { + return m.Size() +} +func (m *MsgTimeout) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTimeout.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTimeout proto.InternalMessageInfo + +// MsgTimeoutResponse defines the Msg/Timeout response type. +type MsgTimeoutResponse struct { +} + +func (m *MsgTimeoutResponse) Reset() { *m = MsgTimeoutResponse{} } +func (m *MsgTimeoutResponse) String() string { return proto.CompactTextString(m) } +func (*MsgTimeoutResponse) ProtoMessage() {} +func (*MsgTimeoutResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{15} +} +func (m *MsgTimeoutResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTimeoutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTimeoutResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTimeoutResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTimeoutResponse.Merge(m, src) +} +func (m *MsgTimeoutResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgTimeoutResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTimeoutResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTimeoutResponse proto.InternalMessageInfo + +// MsgTimeoutOnClose timed-out packet upon counterparty channel closure. +type MsgTimeoutOnClose struct { + Packet Packet `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet"` + ProofUnreceived []byte `protobuf:"bytes,2,opt,name=proof_unreceived,json=proofUnreceived,proto3" json:"proof_unreceived,omitempty" yaml:"proof_unreceived"` + ProofClose []byte `protobuf:"bytes,3,opt,name=proof_close,json=proofClose,proto3" json:"proof_close,omitempty" yaml:"proof_close"` + ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + NextSequenceRecv uint64 `protobuf:"varint,5,opt,name=next_sequence_recv,json=nextSequenceRecv,proto3" json:"next_sequence_recv,omitempty" yaml:"next_sequence_recv"` + Signer string `protobuf:"bytes,6,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgTimeoutOnClose) Reset() { *m = MsgTimeoutOnClose{} } +func (m *MsgTimeoutOnClose) String() string { return proto.CompactTextString(m) } +func (*MsgTimeoutOnClose) ProtoMessage() {} +func (*MsgTimeoutOnClose) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{16} +} +func (m *MsgTimeoutOnClose) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTimeoutOnClose) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTimeoutOnClose.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTimeoutOnClose) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTimeoutOnClose.Merge(m, src) +} +func (m *MsgTimeoutOnClose) XXX_Size() int { + return m.Size() +} +func (m *MsgTimeoutOnClose) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTimeoutOnClose.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTimeoutOnClose proto.InternalMessageInfo + +// MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type. +type MsgTimeoutOnCloseResponse struct { +} + +func (m *MsgTimeoutOnCloseResponse) Reset() { *m = MsgTimeoutOnCloseResponse{} } +func (m *MsgTimeoutOnCloseResponse) String() string { return proto.CompactTextString(m) } +func (*MsgTimeoutOnCloseResponse) ProtoMessage() {} +func (*MsgTimeoutOnCloseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{17} +} +func (m *MsgTimeoutOnCloseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgTimeoutOnCloseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgTimeoutOnCloseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgTimeoutOnCloseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgTimeoutOnCloseResponse.Merge(m, src) +} +func (m *MsgTimeoutOnCloseResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgTimeoutOnCloseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgTimeoutOnCloseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgTimeoutOnCloseResponse proto.InternalMessageInfo + +// MsgAcknowledgement receives incoming IBC acknowledgement +type MsgAcknowledgement struct { + Packet Packet `protobuf:"bytes,1,opt,name=packet,proto3" json:"packet"` + Acknowledgement []byte `protobuf:"bytes,2,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"` + ProofAcked []byte `protobuf:"bytes,3,opt,name=proof_acked,json=proofAcked,proto3" json:"proof_acked,omitempty" yaml:"proof_acked"` + ProofHeight types.Height `protobuf:"bytes,4,opt,name=proof_height,json=proofHeight,proto3" json:"proof_height" yaml:"proof_height"` + Signer string `protobuf:"bytes,5,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgAcknowledgement) Reset() { *m = MsgAcknowledgement{} } +func (m *MsgAcknowledgement) String() string { return proto.CompactTextString(m) } +func (*MsgAcknowledgement) ProtoMessage() {} +func (*MsgAcknowledgement) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{18} +} +func (m *MsgAcknowledgement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAcknowledgement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAcknowledgement.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAcknowledgement) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAcknowledgement.Merge(m, src) +} +func (m *MsgAcknowledgement) XXX_Size() int { + return m.Size() +} +func (m *MsgAcknowledgement) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAcknowledgement.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAcknowledgement proto.InternalMessageInfo + +// MsgAcknowledgementResponse defines the Msg/Acknowledgement response type. +type MsgAcknowledgementResponse struct { +} + +func (m *MsgAcknowledgementResponse) Reset() { *m = MsgAcknowledgementResponse{} } +func (m *MsgAcknowledgementResponse) String() string { return proto.CompactTextString(m) } +func (*MsgAcknowledgementResponse) ProtoMessage() {} +func (*MsgAcknowledgementResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4f707a6c6f551009, []int{19} +} +func (m *MsgAcknowledgementResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAcknowledgementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAcknowledgementResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAcknowledgementResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAcknowledgementResponse.Merge(m, src) +} +func (m *MsgAcknowledgementResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgAcknowledgementResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAcknowledgementResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAcknowledgementResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgChannelOpenInit)(nil), "ibcgo.core.channel.v1.MsgChannelOpenInit") + proto.RegisterType((*MsgChannelOpenInitResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenInitResponse") + proto.RegisterType((*MsgChannelOpenTry)(nil), "ibcgo.core.channel.v1.MsgChannelOpenTry") + proto.RegisterType((*MsgChannelOpenTryResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenTryResponse") + proto.RegisterType((*MsgChannelOpenAck)(nil), "ibcgo.core.channel.v1.MsgChannelOpenAck") + proto.RegisterType((*MsgChannelOpenAckResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenAckResponse") + proto.RegisterType((*MsgChannelOpenConfirm)(nil), "ibcgo.core.channel.v1.MsgChannelOpenConfirm") + proto.RegisterType((*MsgChannelOpenConfirmResponse)(nil), "ibcgo.core.channel.v1.MsgChannelOpenConfirmResponse") + proto.RegisterType((*MsgChannelCloseInit)(nil), "ibcgo.core.channel.v1.MsgChannelCloseInit") + proto.RegisterType((*MsgChannelCloseInitResponse)(nil), "ibcgo.core.channel.v1.MsgChannelCloseInitResponse") + proto.RegisterType((*MsgChannelCloseConfirm)(nil), "ibcgo.core.channel.v1.MsgChannelCloseConfirm") + proto.RegisterType((*MsgChannelCloseConfirmResponse)(nil), "ibcgo.core.channel.v1.MsgChannelCloseConfirmResponse") + proto.RegisterType((*MsgRecvPacket)(nil), "ibcgo.core.channel.v1.MsgRecvPacket") + proto.RegisterType((*MsgRecvPacketResponse)(nil), "ibcgo.core.channel.v1.MsgRecvPacketResponse") + proto.RegisterType((*MsgTimeout)(nil), "ibcgo.core.channel.v1.MsgTimeout") + proto.RegisterType((*MsgTimeoutResponse)(nil), "ibcgo.core.channel.v1.MsgTimeoutResponse") + proto.RegisterType((*MsgTimeoutOnClose)(nil), "ibcgo.core.channel.v1.MsgTimeoutOnClose") + proto.RegisterType((*MsgTimeoutOnCloseResponse)(nil), "ibcgo.core.channel.v1.MsgTimeoutOnCloseResponse") + proto.RegisterType((*MsgAcknowledgement)(nil), "ibcgo.core.channel.v1.MsgAcknowledgement") + proto.RegisterType((*MsgAcknowledgementResponse)(nil), "ibcgo.core.channel.v1.MsgAcknowledgementResponse") +} + +func init() { proto.RegisterFile("ibcgo/core/channel/v1/tx.proto", fileDescriptor_4f707a6c6f551009) } + +var fileDescriptor_4f707a6c6f551009 = []byte{ + // 1126 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6e, 0xe3, 0x54, + 0x14, 0xce, 0x5f, 0xd3, 0xf6, 0xb4, 0x4c, 0x5b, 0xa7, 0x3f, 0x19, 0x67, 0x6a, 0x77, 0x0c, 0x8b, + 0x0c, 0x4c, 0x93, 0x49, 0x29, 0x42, 0x1a, 0x24, 0xa4, 0xa4, 0x12, 0x9a, 0x11, 0x2a, 0x83, 0x4c, + 0x01, 0x69, 0x84, 0x14, 0xd2, 0x9b, 0x3b, 0xae, 0x95, 0xc4, 0x37, 0xd8, 0x4e, 0x68, 0xc4, 0x0b, + 0xb0, 0x64, 0xc1, 0x8a, 0x05, 0x1a, 0x89, 0x35, 0x0b, 0x24, 0x1e, 0x62, 0x96, 0xb3, 0xe3, 0x67, + 0x61, 0xa1, 0x76, 0xc3, 0xda, 0x4f, 0x80, 0x7c, 0x7d, 0xed, 0x38, 0x89, 0xdd, 0x3a, 0x1d, 0xd2, + 0xe9, 0xce, 0x3e, 0xe7, 0xbb, 0xe7, 0x9c, 0xfb, 0x7d, 0xc7, 0xc7, 0xd7, 0x06, 0x41, 0x3d, 0x46, + 0x0a, 0x29, 0x23, 0xa2, 0xe3, 0x32, 0x3a, 0x69, 0x68, 0x1a, 0x6e, 0x97, 0xfb, 0x95, 0xb2, 0x79, + 0x5a, 0xea, 0xea, 0xc4, 0x24, 0xdc, 0x06, 0xf5, 0x97, 0x1c, 0x7f, 0x89, 0xf9, 0x4b, 0xfd, 0x0a, + 0xbf, 0xae, 0x10, 0x85, 0x50, 0x44, 0xd9, 0xb9, 0x72, 0xc1, 0xfc, 0xdd, 0x60, 0xb0, 0xb6, 0x8a, + 0x35, 0xd3, 0x89, 0xe5, 0x5e, 0x31, 0xc8, 0x9b, 0xe1, 0xf9, 0xbc, 0xd0, 0x14, 0x24, 0xfd, 0x92, + 0x04, 0xee, 0xd0, 0x50, 0x0e, 0x5c, 0xe3, 0x93, 0x2e, 0xd6, 0x1e, 0x6b, 0xaa, 0xc9, 0xbd, 0x03, + 0xf3, 0x5d, 0xa2, 0x9b, 0x75, 0xb5, 0x99, 0x4f, 0xee, 0x24, 0x8b, 0x8b, 0x35, 0xce, 0xb6, 0xc4, + 0x5b, 0x83, 0x46, 0xa7, 0xfd, 0x50, 0x62, 0x0e, 0x49, 0xce, 0x3a, 0x57, 0x8f, 0x9b, 0xdc, 0x87, + 0x30, 0xcf, 0x82, 0xe6, 0x53, 0x3b, 0xc9, 0xe2, 0xd2, 0x9e, 0x50, 0x0a, 0xdd, 0x4a, 0x89, 0x65, + 0xa9, 0x65, 0x5e, 0x58, 0x62, 0x42, 0xf6, 0x16, 0x71, 0x9b, 0x90, 0x35, 0x54, 0x45, 0xc3, 0x7a, + 0x3e, 0xed, 0xe4, 0x92, 0xd9, 0xdd, 0xc3, 0x85, 0xef, 0x9f, 0x8b, 0x89, 0x7f, 0x9f, 0x8b, 0x09, + 0xe9, 0x0e, 0xf0, 0x93, 0x45, 0xca, 0xd8, 0xe8, 0x12, 0xcd, 0xc0, 0xd2, 0xdf, 0x69, 0x58, 0x1b, + 0x75, 0x1f, 0xe9, 0x83, 0xe9, 0xb6, 0xf0, 0x09, 0xe4, 0xba, 0x3a, 0xee, 0xab, 0xa4, 0x67, 0xd4, + 0x59, 0x59, 0xce, 0xc2, 0x14, 0x5d, 0x28, 0xd8, 0x96, 0xc8, 0xb3, 0x85, 0x93, 0x20, 0x49, 0x5e, + 0xf3, 0xac, 0xac, 0x82, 0x51, 0x4a, 0xd2, 0x57, 0xa1, 0x44, 0x86, 0x75, 0x44, 0x7a, 0x9a, 0x89, + 0xf5, 0x6e, 0x43, 0x37, 0x07, 0xf5, 0x3e, 0xd6, 0x0d, 0x95, 0x68, 0xf9, 0x0c, 0x2d, 0x48, 0xb4, + 0x2d, 0xb1, 0xe0, 0x16, 0x14, 0x86, 0x92, 0xe4, 0x5c, 0xd0, 0xfc, 0x85, 0x6b, 0xe5, 0xf6, 0x01, + 0xba, 0x3a, 0x21, 0xcf, 0xea, 0xaa, 0xa6, 0x9a, 0xf9, 0xb9, 0x9d, 0x64, 0x71, 0xb9, 0xb6, 0x61, + 0x5b, 0xe2, 0x9a, 0xb7, 0x35, 0xcf, 0x27, 0xc9, 0x8b, 0xf4, 0x86, 0x76, 0xc2, 0x57, 0xb0, 0xec, + 0x7a, 0x4e, 0xb0, 0xaa, 0x9c, 0x98, 0xf9, 0x2c, 0xdd, 0xce, 0x9d, 0x91, 0xed, 0xb8, 0x5d, 0xd7, + 0xaf, 0x94, 0x1e, 0x51, 0x4c, 0xad, 0xe0, 0x6c, 0xc6, 0xb6, 0xc4, 0x5c, 0x30, 0xb2, 0xbb, 0x5e, + 0x92, 0x97, 0xe8, 0xad, 0x8b, 0x0c, 0x48, 0x3f, 0x1f, 0x21, 0x7d, 0x01, 0x6e, 0x4f, 0x68, 0xeb, + 0x2b, 0xff, 0xd7, 0x84, 0xf2, 0x55, 0xd4, 0x9a, 0x4e, 0xf9, 0x7d, 0x80, 0x09, 0xc1, 0x03, 0xac, + 0x04, 0x75, 0x5e, 0x44, 0xbe, 0xbe, 0x4f, 0x61, 0x6b, 0x84, 0xf9, 0x40, 0x08, 0xda, 0xc3, 0x35, + 0xc9, 0xb6, 0x44, 0x21, 0x44, 0xa2, 0x60, 0xbc, 0x8d, 0xa0, 0x67, 0xd8, 0x3b, 0xb3, 0xd0, 0xbe, + 0x02, 0xae, 0xa4, 0x75, 0x53, 0x1f, 0x30, 0xe9, 0xd7, 0x6d, 0x4b, 0x5c, 0x0d, 0x0a, 0x64, 0xea, + 0x03, 0x49, 0x5e, 0xa0, 0xd7, 0xce, 0xf3, 0x73, 0xe3, 0x84, 0xaf, 0xa2, 0x96, 0x2f, 0xfc, 0xaf, + 0x29, 0xd8, 0x18, 0xf5, 0x1e, 0x10, 0xed, 0x99, 0xaa, 0x77, 0xae, 0x43, 0x7c, 0x9f, 0xcc, 0x06, + 0x6a, 0x51, 0xb9, 0x43, 0xc8, 0x6c, 0xa0, 0x96, 0x47, 0xa6, 0xd3, 0x92, 0xe3, 0x64, 0x66, 0x66, + 0x44, 0xe6, 0x5c, 0x04, 0x99, 0x22, 0x6c, 0x87, 0xd2, 0xe5, 0x13, 0xfa, 0x53, 0x12, 0x72, 0x43, + 0xc4, 0x41, 0x9b, 0x18, 0x78, 0xfa, 0x17, 0xc1, 0xd5, 0xe8, 0xbc, 0x7c, 0xfc, 0x6f, 0x43, 0x21, + 0xa4, 0x36, 0xbf, 0xf6, 0xdf, 0x52, 0xb0, 0x39, 0xe6, 0xbf, 0xc6, 0x6e, 0x18, 0x1d, 0xab, 0xe9, + 0x2b, 0x8e, 0xd5, 0xeb, 0x6e, 0x88, 0x1d, 0x10, 0xc2, 0x29, 0xf3, 0x59, 0xfd, 0x31, 0x05, 0x6f, + 0x1c, 0x1a, 0x8a, 0x8c, 0x51, 0xff, 0xd3, 0x06, 0x6a, 0x61, 0x93, 0xfb, 0x00, 0xb2, 0x5d, 0x7a, + 0x45, 0xb9, 0x5c, 0xda, 0xdb, 0x8e, 0x78, 0xa7, 0xb9, 0x70, 0xf6, 0x4a, 0x63, 0x4b, 0xb8, 0x8f, + 0x60, 0xd5, 0x2d, 0x18, 0x91, 0x4e, 0x47, 0x35, 0x3b, 0x58, 0x33, 0x29, 0xc5, 0xcb, 0xb5, 0x82, + 0x6d, 0x89, 0x5b, 0xc1, 0x2d, 0x0d, 0x11, 0x92, 0xbc, 0x42, 0x4d, 0x07, 0xbe, 0x65, 0x82, 0xb8, + 0xf4, 0x8c, 0x88, 0xcb, 0x44, 0x10, 0xb7, 0x45, 0x07, 0xcf, 0x90, 0x15, 0x9f, 0x2f, 0x2b, 0x05, + 0x70, 0x68, 0x28, 0x47, 0x6a, 0x07, 0x93, 0xde, 0xff, 0x45, 0x56, 0x4f, 0xd3, 0x31, 0xc2, 0x6a, + 0x1f, 0x37, 0xa3, 0xc8, 0x1a, 0x22, 0x3c, 0xb2, 0x3e, 0xf7, 0x2d, 0x33, 0x26, 0xeb, 0x63, 0xe0, + 0x34, 0x7c, 0x6a, 0xd6, 0x0d, 0xfc, 0x4d, 0x0f, 0x6b, 0x08, 0xd7, 0x75, 0x8c, 0xfa, 0x94, 0xb8, + 0x4c, 0x6d, 0xdb, 0xb6, 0xc4, 0xdb, 0x6e, 0x84, 0x49, 0x8c, 0x24, 0xaf, 0x3a, 0xc6, 0xcf, 0x98, + 0xcd, 0x21, 0x33, 0x46, 0xcb, 0xae, 0xd3, 0x93, 0x2a, 0xe3, 0xd7, 0xa7, 0xfd, 0x67, 0xf7, 0x08, + 0xc0, 0xcc, 0x4f, 0x34, 0xda, 0xcb, 0x37, 0x83, 0xfd, 0xf7, 0x61, 0x89, 0x35, 0xb4, 0x53, 0x13, + 0x1b, 0x0d, 0x9b, 0xb6, 0x25, 0x72, 0x23, 0xdd, 0xee, 0x38, 0x25, 0xd9, 0x1d, 0x22, 0x6e, 0xf5, + 0xb3, 0x1d, 0x0e, 0xe1, 0xb2, 0xcd, 0xbd, 0xaa, 0x6c, 0xd9, 0x0b, 0xdf, 0xe3, 0xa3, 0xfa, 0xf8, + 0xea, 0xfd, 0x9e, 0xa2, 0xa2, 0x56, 0x51, 0x4b, 0x23, 0xdf, 0xb6, 0x71, 0x53, 0xc1, 0xf4, 0x21, + 0x7f, 0x25, 0xf9, 0x8a, 0xb0, 0xd2, 0x18, 0x8d, 0xe7, 0xaa, 0x27, 0x8f, 0x9b, 0x87, 0x02, 0x39, + 0x0b, 0x9b, 0x51, 0x02, 0x51, 0xa7, 0x27, 0x50, 0xd5, 0xb9, 0x79, 0xed, 0xd3, 0xdb, 0xfd, 0x1e, + 0x1a, 0x63, 0xcd, 0x23, 0x75, 0xef, 0x8f, 0x05, 0x48, 0x1f, 0x1a, 0x0a, 0x47, 0x60, 0x65, 0xfc, + 0xbb, 0xee, 0x5e, 0x04, 0x91, 0x93, 0x5f, 0x57, 0x7c, 0x25, 0x36, 0xd4, 0x4b, 0xcc, 0xb5, 0xe1, + 0xd6, 0xd8, 0x47, 0x58, 0x31, 0x56, 0x90, 0x23, 0x7d, 0xc0, 0x3f, 0x88, 0x8b, 0x8c, 0xc8, 0xe6, + 0x9c, 0xb2, 0xe2, 0x65, 0xab, 0xa2, 0x56, 0xcc, 0x6c, 0x81, 0x13, 0x27, 0x77, 0x0a, 0x5c, 0xc8, + 0x69, 0xf3, 0x7e, 0xac, 0x38, 0x0c, 0xcd, 0xef, 0x4f, 0x83, 0xf6, 0x33, 0xeb, 0xb0, 0x3a, 0x71, + 0x2c, 0x7b, 0xfb, 0xd2, 0x48, 0x3e, 0x96, 0xdf, 0x8b, 0x8f, 0xf5, 0x73, 0x7e, 0x07, 0xb9, 0xb0, + 0xe3, 0xd4, 0x6e, 0xbc, 0x50, 0xde, 0x7e, 0xdf, 0x9b, 0x0a, 0xee, 0x27, 0xff, 0x1a, 0x20, 0x70, + 0xea, 0x78, 0x2b, 0x3a, 0xc8, 0x10, 0xc5, 0xdf, 0x8f, 0x83, 0xf2, 0x33, 0x7c, 0x09, 0xf3, 0xde, + 0x7b, 0xfa, 0x6e, 0xf4, 0x42, 0x06, 0xe1, 0xef, 0x5d, 0x0a, 0x09, 0xf6, 0xe4, 0xd8, 0x9b, 0xa8, + 0x78, 0xe9, 0x62, 0x86, 0xbc, 0xa8, 0x27, 0xc3, 0xa7, 0xa7, 0xf3, 0x80, 0x8f, 0x4f, 0xce, 0x0b, + 0x6a, 0x1d, 0x83, 0x5e, 0xf4, 0x80, 0x47, 0x4c, 0x96, 0xda, 0xa3, 0x17, 0x67, 0x42, 0xf2, 0xe5, + 0x99, 0x90, 0xfc, 0xe7, 0x4c, 0x48, 0xfe, 0x70, 0x2e, 0x24, 0x5e, 0x9e, 0x0b, 0x89, 0x3f, 0xcf, + 0x85, 0xc4, 0xd3, 0x92, 0xa2, 0x9a, 0x27, 0xbd, 0xe3, 0x12, 0x22, 0x9d, 0x32, 0x22, 0x46, 0x87, + 0x18, 0x65, 0xf5, 0x18, 0xed, 0x7a, 0xff, 0x9f, 0x1e, 0xec, 0xef, 0x7a, 0xbf, 0xa0, 0xcc, 0x41, + 0x17, 0x1b, 0xc7, 0x59, 0xfa, 0xfb, 0xe9, 0xdd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xfe, + 0x8d, 0x31, 0x15, 0x13, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit. + ChannelOpenInit(ctx context.Context, in *MsgChannelOpenInit, opts ...grpc.CallOption) (*MsgChannelOpenInitResponse, error) + // ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry. + ChannelOpenTry(ctx context.Context, in *MsgChannelOpenTry, opts ...grpc.CallOption) (*MsgChannelOpenTryResponse, error) + // ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck. + ChannelOpenAck(ctx context.Context, in *MsgChannelOpenAck, opts ...grpc.CallOption) (*MsgChannelOpenAckResponse, error) + // ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm. + ChannelOpenConfirm(ctx context.Context, in *MsgChannelOpenConfirm, opts ...grpc.CallOption) (*MsgChannelOpenConfirmResponse, error) + // ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit. + ChannelCloseInit(ctx context.Context, in *MsgChannelCloseInit, opts ...grpc.CallOption) (*MsgChannelCloseInitResponse, error) + // ChannelCloseConfirm defines a rpc handler method for + // MsgChannelCloseConfirm. + ChannelCloseConfirm(ctx context.Context, in *MsgChannelCloseConfirm, opts ...grpc.CallOption) (*MsgChannelCloseConfirmResponse, error) + // RecvPacket defines a rpc handler method for MsgRecvPacket. + RecvPacket(ctx context.Context, in *MsgRecvPacket, opts ...grpc.CallOption) (*MsgRecvPacketResponse, error) + // Timeout defines a rpc handler method for MsgTimeout. + Timeout(ctx context.Context, in *MsgTimeout, opts ...grpc.CallOption) (*MsgTimeoutResponse, error) + // TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose. + TimeoutOnClose(ctx context.Context, in *MsgTimeoutOnClose, opts ...grpc.CallOption) (*MsgTimeoutOnCloseResponse, error) + // Acknowledgement defines a rpc handler method for MsgAcknowledgement. + Acknowledgement(ctx context.Context, in *MsgAcknowledgement, opts ...grpc.CallOption) (*MsgAcknowledgementResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) ChannelOpenInit(ctx context.Context, in *MsgChannelOpenInit, opts ...grpc.CallOption) (*MsgChannelOpenInitResponse, error) { + out := new(MsgChannelOpenInitResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenInit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ChannelOpenTry(ctx context.Context, in *MsgChannelOpenTry, opts ...grpc.CallOption) (*MsgChannelOpenTryResponse, error) { + out := new(MsgChannelOpenTryResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenTry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ChannelOpenAck(ctx context.Context, in *MsgChannelOpenAck, opts ...grpc.CallOption) (*MsgChannelOpenAckResponse, error) { + out := new(MsgChannelOpenAckResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenAck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ChannelOpenConfirm(ctx context.Context, in *MsgChannelOpenConfirm, opts ...grpc.CallOption) (*MsgChannelOpenConfirmResponse, error) { + out := new(MsgChannelOpenConfirmResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelOpenConfirm", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ChannelCloseInit(ctx context.Context, in *MsgChannelCloseInit, opts ...grpc.CallOption) (*MsgChannelCloseInitResponse, error) { + out := new(MsgChannelCloseInitResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelCloseInit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ChannelCloseConfirm(ctx context.Context, in *MsgChannelCloseConfirm, opts ...grpc.CallOption) (*MsgChannelCloseConfirmResponse, error) { + out := new(MsgChannelCloseConfirmResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/ChannelCloseConfirm", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RecvPacket(ctx context.Context, in *MsgRecvPacket, opts ...grpc.CallOption) (*MsgRecvPacketResponse, error) { + out := new(MsgRecvPacketResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/RecvPacket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) Timeout(ctx context.Context, in *MsgTimeout, opts ...grpc.CallOption) (*MsgTimeoutResponse, error) { + out := new(MsgTimeoutResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/Timeout", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) TimeoutOnClose(ctx context.Context, in *MsgTimeoutOnClose, opts ...grpc.CallOption) (*MsgTimeoutOnCloseResponse, error) { + out := new(MsgTimeoutOnCloseResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/TimeoutOnClose", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) Acknowledgement(ctx context.Context, in *MsgAcknowledgement, opts ...grpc.CallOption) (*MsgAcknowledgementResponse, error) { + out := new(MsgAcknowledgementResponse) + err := c.cc.Invoke(ctx, "/ibcgo.core.channel.v1.Msg/Acknowledgement", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit. + ChannelOpenInit(context.Context, *MsgChannelOpenInit) (*MsgChannelOpenInitResponse, error) + // ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry. + ChannelOpenTry(context.Context, *MsgChannelOpenTry) (*MsgChannelOpenTryResponse, error) + // ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck. + ChannelOpenAck(context.Context, *MsgChannelOpenAck) (*MsgChannelOpenAckResponse, error) + // ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm. + ChannelOpenConfirm(context.Context, *MsgChannelOpenConfirm) (*MsgChannelOpenConfirmResponse, error) + // ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit. + ChannelCloseInit(context.Context, *MsgChannelCloseInit) (*MsgChannelCloseInitResponse, error) + // ChannelCloseConfirm defines a rpc handler method for + // MsgChannelCloseConfirm. + ChannelCloseConfirm(context.Context, *MsgChannelCloseConfirm) (*MsgChannelCloseConfirmResponse, error) + // RecvPacket defines a rpc handler method for MsgRecvPacket. + RecvPacket(context.Context, *MsgRecvPacket) (*MsgRecvPacketResponse, error) + // Timeout defines a rpc handler method for MsgTimeout. + Timeout(context.Context, *MsgTimeout) (*MsgTimeoutResponse, error) + // TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose. + TimeoutOnClose(context.Context, *MsgTimeoutOnClose) (*MsgTimeoutOnCloseResponse, error) + // Acknowledgement defines a rpc handler method for MsgAcknowledgement. + Acknowledgement(context.Context, *MsgAcknowledgement) (*MsgAcknowledgementResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) ChannelOpenInit(ctx context.Context, req *MsgChannelOpenInit) (*MsgChannelOpenInitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChannelOpenInit not implemented") +} +func (*UnimplementedMsgServer) ChannelOpenTry(ctx context.Context, req *MsgChannelOpenTry) (*MsgChannelOpenTryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChannelOpenTry not implemented") +} +func (*UnimplementedMsgServer) ChannelOpenAck(ctx context.Context, req *MsgChannelOpenAck) (*MsgChannelOpenAckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChannelOpenAck not implemented") +} +func (*UnimplementedMsgServer) ChannelOpenConfirm(ctx context.Context, req *MsgChannelOpenConfirm) (*MsgChannelOpenConfirmResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChannelOpenConfirm not implemented") +} +func (*UnimplementedMsgServer) ChannelCloseInit(ctx context.Context, req *MsgChannelCloseInit) (*MsgChannelCloseInitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChannelCloseInit not implemented") +} +func (*UnimplementedMsgServer) ChannelCloseConfirm(ctx context.Context, req *MsgChannelCloseConfirm) (*MsgChannelCloseConfirmResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ChannelCloseConfirm not implemented") +} +func (*UnimplementedMsgServer) RecvPacket(ctx context.Context, req *MsgRecvPacket) (*MsgRecvPacketResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RecvPacket not implemented") +} +func (*UnimplementedMsgServer) Timeout(ctx context.Context, req *MsgTimeout) (*MsgTimeoutResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Timeout not implemented") +} +func (*UnimplementedMsgServer) TimeoutOnClose(ctx context.Context, req *MsgTimeoutOnClose) (*MsgTimeoutOnCloseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TimeoutOnClose not implemented") +} +func (*UnimplementedMsgServer) Acknowledgement(ctx context.Context, req *MsgAcknowledgement) (*MsgAcknowledgementResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Acknowledgement not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_ChannelOpenInit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgChannelOpenInit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ChannelOpenInit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenInit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ChannelOpenInit(ctx, req.(*MsgChannelOpenInit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ChannelOpenTry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgChannelOpenTry) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ChannelOpenTry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenTry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ChannelOpenTry(ctx, req.(*MsgChannelOpenTry)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ChannelOpenAck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgChannelOpenAck) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ChannelOpenAck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenAck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ChannelOpenAck(ctx, req.(*MsgChannelOpenAck)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ChannelOpenConfirm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgChannelOpenConfirm) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ChannelOpenConfirm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelOpenConfirm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ChannelOpenConfirm(ctx, req.(*MsgChannelOpenConfirm)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ChannelCloseInit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgChannelCloseInit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ChannelCloseInit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelCloseInit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ChannelCloseInit(ctx, req.(*MsgChannelCloseInit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ChannelCloseConfirm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgChannelCloseConfirm) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ChannelCloseConfirm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/ChannelCloseConfirm", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ChannelCloseConfirm(ctx, req.(*MsgChannelCloseConfirm)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RecvPacket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRecvPacket) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RecvPacket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/RecvPacket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RecvPacket(ctx, req.(*MsgRecvPacket)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_Timeout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgTimeout) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Timeout(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/Timeout", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Timeout(ctx, req.(*MsgTimeout)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_TimeoutOnClose_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgTimeoutOnClose) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).TimeoutOnClose(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/TimeoutOnClose", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).TimeoutOnClose(ctx, req.(*MsgTimeoutOnClose)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_Acknowledgement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgAcknowledgement) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Acknowledgement(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibcgo.core.channel.v1.Msg/Acknowledgement", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Acknowledgement(ctx, req.(*MsgAcknowledgement)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibcgo.core.channel.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ChannelOpenInit", + Handler: _Msg_ChannelOpenInit_Handler, + }, + { + MethodName: "ChannelOpenTry", + Handler: _Msg_ChannelOpenTry_Handler, + }, + { + MethodName: "ChannelOpenAck", + Handler: _Msg_ChannelOpenAck_Handler, + }, + { + MethodName: "ChannelOpenConfirm", + Handler: _Msg_ChannelOpenConfirm_Handler, + }, + { + MethodName: "ChannelCloseInit", + Handler: _Msg_ChannelCloseInit_Handler, + }, + { + MethodName: "ChannelCloseConfirm", + Handler: _Msg_ChannelCloseConfirm_Handler, + }, + { + MethodName: "RecvPacket", + Handler: _Msg_RecvPacket_Handler, + }, + { + MethodName: "Timeout", + Handler: _Msg_Timeout_Handler, + }, + { + MethodName: "TimeoutOnClose", + Handler: _Msg_TimeoutOnClose_Handler, + }, + { + MethodName: "Acknowledgement", + Handler: _Msg_Acknowledgement_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibcgo/core/channel/v1/tx.proto", +} + +func (m *MsgChannelOpenInit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelOpenInit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelOpenInit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintTx(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgChannelOpenInitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelOpenInitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelOpenInitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgChannelOpenTry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelOpenTry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelOpenTry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x3a + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.ProofInit) > 0 { + i -= len(m.ProofInit) + copy(dAtA[i:], m.ProofInit) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofInit))) + i-- + dAtA[i] = 0x2a + } + if len(m.CounterpartyVersion) > 0 { + i -= len(m.CounterpartyVersion) + copy(dAtA[i:], m.CounterpartyVersion) + i = encodeVarintTx(dAtA, i, uint64(len(m.CounterpartyVersion))) + i-- + dAtA[i] = 0x22 + } + { + size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.PreviousChannelId) > 0 { + i -= len(m.PreviousChannelId) + copy(dAtA[i:], m.PreviousChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.PreviousChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintTx(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgChannelOpenTryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelOpenTryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelOpenTryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgChannelOpenAck) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelOpenAck) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelOpenAck) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x3a + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.ProofTry) > 0 { + i -= len(m.ProofTry) + copy(dAtA[i:], m.ProofTry) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofTry))) + i-- + dAtA[i] = 0x2a + } + if len(m.CounterpartyVersion) > 0 { + i -= len(m.CounterpartyVersion) + copy(dAtA[i:], m.CounterpartyVersion) + i = encodeVarintTx(dAtA, i, uint64(len(m.CounterpartyVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.CounterpartyChannelId) > 0 { + i -= len(m.CounterpartyChannelId) + copy(dAtA[i:], m.CounterpartyChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.CounterpartyChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintTx(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgChannelOpenAckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelOpenAckResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelOpenAckResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgChannelOpenConfirm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelOpenConfirm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelOpenConfirm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x2a + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ProofAck) > 0 { + i -= len(m.ProofAck) + copy(dAtA[i:], m.ProofAck) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofAck))) + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintTx(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgChannelOpenConfirmResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelOpenConfirmResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelOpenConfirmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgChannelCloseInit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelCloseInit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelCloseInit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintTx(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgChannelCloseInitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelCloseInitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelCloseInitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgChannelCloseConfirm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelCloseConfirm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelCloseConfirm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x2a + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ProofInit) > 0 { + i -= len(m.ProofInit) + copy(dAtA[i:], m.ProofInit) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofInit))) + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.PortId) > 0 { + i -= len(m.PortId) + copy(dAtA[i:], m.PortId) + i = encodeVarintTx(dAtA, i, uint64(len(m.PortId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgChannelCloseConfirmResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgChannelCloseConfirmResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgChannelCloseConfirmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgRecvPacket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRecvPacket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRecvPacket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x22 + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ProofCommitment) > 0 { + i -= len(m.ProofCommitment) + copy(dAtA[i:], m.ProofCommitment) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofCommitment))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Packet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgRecvPacketResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRecvPacketResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRecvPacketResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgTimeout) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTimeout) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTimeout) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x2a + } + if m.NextSequenceRecv != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.NextSequenceRecv)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ProofUnreceived) > 0 { + i -= len(m.ProofUnreceived) + copy(dAtA[i:], m.ProofUnreceived) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofUnreceived))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Packet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgTimeoutResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTimeoutResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTimeoutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgTimeoutOnClose) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTimeoutOnClose) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTimeoutOnClose) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x32 + } + if m.NextSequenceRecv != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.NextSequenceRecv)) + i-- + dAtA[i] = 0x28 + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ProofClose) > 0 { + i -= len(m.ProofClose) + copy(dAtA[i:], m.ProofClose) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofClose))) + i-- + dAtA[i] = 0x1a + } + if len(m.ProofUnreceived) > 0 { + i -= len(m.ProofUnreceived) + copy(dAtA[i:], m.ProofUnreceived) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofUnreceived))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Packet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgTimeoutOnCloseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgTimeoutOnCloseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgTimeoutOnCloseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgAcknowledgement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAcknowledgement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAcknowledgement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x2a + } + { + size, err := m.ProofHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ProofAcked) > 0 { + i -= len(m.ProofAcked) + copy(dAtA[i:], m.ProofAcked) + i = encodeVarintTx(dAtA, i, uint64(len(m.ProofAcked))) + i-- + dAtA[i] = 0x1a + } + if len(m.Acknowledgement) > 0 { + i -= len(m.Acknowledgement) + copy(dAtA[i:], m.Acknowledgement) + i = encodeVarintTx(dAtA, i, uint64(len(m.Acknowledgement))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Packet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgAcknowledgementResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAcknowledgementResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAcknowledgementResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgChannelOpenInit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Channel.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgChannelOpenInitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgChannelOpenTry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.PreviousChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.Channel.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.CounterpartyVersion) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofInit) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgChannelOpenTryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgChannelOpenAck) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.CounterpartyChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.CounterpartyVersion) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofTry) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgChannelOpenAckResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgChannelOpenConfirm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofAck) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgChannelOpenConfirmResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgChannelCloseInit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgChannelCloseInitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgChannelCloseConfirm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PortId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofInit) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgChannelCloseConfirmResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRecvPacket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Packet.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.ProofCommitment) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgRecvPacketResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgTimeout) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Packet.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.ProofUnreceived) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + if m.NextSequenceRecv != 0 { + n += 1 + sovTx(uint64(m.NextSequenceRecv)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgTimeoutResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgTimeoutOnClose) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Packet.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.ProofUnreceived) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofClose) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + if m.NextSequenceRecv != 0 { + n += 1 + sovTx(uint64(m.NextSequenceRecv)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgTimeoutOnCloseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgAcknowledgement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Packet.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Acknowledgement) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ProofAcked) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.ProofHeight.Size() + n += 1 + l + sovTx(uint64(l)) + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgAcknowledgementResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgChannelOpenInit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelOpenInit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelOpenInit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelOpenInitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelOpenInitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelOpenInitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelOpenTry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelOpenTry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelOpenTry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CounterpartyVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofInit", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofInit = append(m.ProofInit[:0], dAtA[iNdEx:postIndex]...) + if m.ProofInit == nil { + m.ProofInit = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelOpenTryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelOpenTryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelOpenTryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelOpenAck) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelOpenAck: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelOpenAck: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CounterpartyChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CounterpartyVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CounterpartyVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofTry", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofTry = append(m.ProofTry[:0], dAtA[iNdEx:postIndex]...) + if m.ProofTry == nil { + m.ProofTry = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelOpenAckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelOpenAckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelOpenAckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelOpenConfirm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelOpenConfirm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelOpenConfirm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofAck", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofAck = append(m.ProofAck[:0], dAtA[iNdEx:postIndex]...) + if m.ProofAck == nil { + m.ProofAck = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelOpenConfirmResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelOpenConfirmResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelOpenConfirmResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelCloseInit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelCloseInit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelCloseInit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelCloseInitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelCloseInitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelCloseInitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelCloseConfirm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelCloseConfirm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelCloseConfirm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofInit", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofInit = append(m.ProofInit[:0], dAtA[iNdEx:postIndex]...) + if m.ProofInit == nil { + m.ProofInit = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgChannelCloseConfirmResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgChannelCloseConfirmResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgChannelCloseConfirmResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRecvPacket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRecvPacket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRecvPacket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Packet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofCommitment", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofCommitment = append(m.ProofCommitment[:0], dAtA[iNdEx:postIndex]...) + if m.ProofCommitment == nil { + m.ProofCommitment = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRecvPacketResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRecvPacketResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRecvPacketResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTimeout) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTimeout: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTimeout: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Packet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofUnreceived", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofUnreceived = append(m.ProofUnreceived[:0], dAtA[iNdEx:postIndex]...) + if m.ProofUnreceived == nil { + m.ProofUnreceived = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextSequenceRecv", wireType) + } + m.NextSequenceRecv = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextSequenceRecv |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTimeoutResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTimeoutResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTimeoutResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTimeoutOnClose) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTimeoutOnClose: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTimeoutOnClose: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Packet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofUnreceived", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofUnreceived = append(m.ProofUnreceived[:0], dAtA[iNdEx:postIndex]...) + if m.ProofUnreceived == nil { + m.ProofUnreceived = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofClose", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofClose = append(m.ProofClose[:0], dAtA[iNdEx:postIndex]...) + if m.ProofClose == nil { + m.ProofClose = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextSequenceRecv", wireType) + } + m.NextSequenceRecv = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextSequenceRecv |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgTimeoutOnCloseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgTimeoutOnCloseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgTimeoutOnCloseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgAcknowledgement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAcknowledgement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAcknowledgement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Packet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Packet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...) + if m.Acknowledgement == nil { + m.Acknowledgement = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofAcked", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofAcked = append(m.ProofAcked[:0], dAtA[iNdEx:postIndex]...) + if m.ProofAcked == nil { + m.ProofAcked = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProofHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgAcknowledgementResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAcknowledgementResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAcknowledgementResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/05-port/keeper/keeper.go b/core/05-port/keeper/keeper.go new file mode 100644 index 0000000000..8a4b2300a4 --- /dev/null +++ b/core/05-port/keeper/keeper.go @@ -0,0 +1,80 @@ +package keeper + +import ( + "fmt" + + "github.com/tendermint/tendermint/libs/log" + + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// Keeper defines the IBC connection keeper +type Keeper struct { + scopedKeeper capabilitykeeper.ScopedKeeper +} + +// NewKeeper creates a new IBC connection Keeper instance +func NewKeeper(sck capabilitykeeper.ScopedKeeper) Keeper { + return Keeper{ + scopedKeeper: sck, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName) +} + +// isBounded checks a given port ID is already bounded. +func (k Keeper) isBound(ctx sdk.Context, portID string) bool { + _, ok := k.scopedKeeper.GetCapability(ctx, host.PortPath(portID)) + return ok +} + +// BindPort binds to a port and returns the associated capability. +// Ports must be bound statically when the chain starts in `app.go`. +// The capability must then be passed to a module which will need to pass +// it as an extra parameter when calling functions on the IBC module. +func (k *Keeper) BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability { + if err := host.PortIdentifierValidator(portID); err != nil { + panic(err.Error()) + } + + if k.isBound(ctx, portID) { + panic(fmt.Sprintf("port %s is already bound", portID)) + } + + key, err := k.scopedKeeper.NewCapability(ctx, host.PortPath(portID)) + if err != nil { + panic(err.Error()) + } + + k.Logger(ctx).Info("port binded", "port", portID) + return key +} + +// Authenticate authenticates a capability key against a port ID +// by checking if the memory address of the capability was previously +// generated and bound to the port (provided as a parameter) which the capability +// is being authenticated against. +func (k Keeper) Authenticate(ctx sdk.Context, key *capabilitytypes.Capability, portID string) bool { + if err := host.PortIdentifierValidator(portID); err != nil { + panic(err.Error()) + } + + return k.scopedKeeper.AuthenticateCapability(ctx, key, host.PortPath(portID)) +} + +// LookupModuleByPort will return the IBCModule along with the capability associated with a given portID +func (k Keeper) LookupModuleByPort(ctx sdk.Context, portID string) (string, *capabilitytypes.Capability, error) { + modules, cap, err := k.scopedKeeper.LookupModules(ctx, host.PortPath(portID)) + if err != nil { + return "", nil, err + } + + return types.GetModuleOwner(modules), cap, nil +} diff --git a/core/05-port/keeper/keeper_test.go b/core/05-port/keeper/keeper_test.go new file mode 100644 index 0000000000..29c0e15857 --- /dev/null +++ b/core/05-port/keeper/keeper_test.go @@ -0,0 +1,70 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + "github.com/cosmos/cosmos-sdk/simapp" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/keeper" +) + +var ( + validPort = "validportid" + invalidPort = "(invalidPortID)" +) + +type KeeperTestSuite struct { + suite.Suite + + ctx sdk.Context + keeper *keeper.Keeper +} + +func (suite *KeeperTestSuite) SetupTest() { + isCheckTx := false + app := simapp.Setup(isCheckTx) + + suite.ctx = app.BaseApp.NewContext(isCheckTx, tmproto.Header{}) + suite.keeper = &app.IBCKeeper.PortKeeper +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} + +func (suite *KeeperTestSuite) TestBind() { + // Test that invalid portID causes panic + require.Panics(suite.T(), func() { suite.keeper.BindPort(suite.ctx, invalidPort) }, "Did not panic on invalid portID") + + // Test that valid BindPort returns capability key + capKey := suite.keeper.BindPort(suite.ctx, validPort) + require.NotNil(suite.T(), capKey, "capabilityKey is nil on valid BindPort") + + // Test that rebinding the same portid causes panic + require.Panics(suite.T(), func() { suite.keeper.BindPort(suite.ctx, validPort) }, "did not panic on re-binding the same port") +} + +func (suite *KeeperTestSuite) TestAuthenticate() { + capKey := suite.keeper.BindPort(suite.ctx, validPort) + + // Require that passing in invalid portID causes panic + require.Panics(suite.T(), func() { suite.keeper.Authenticate(suite.ctx, capKey, invalidPort) }, "did not panic on invalid portID") + + // Valid authentication should return true + auth := suite.keeper.Authenticate(suite.ctx, capKey, validPort) + require.True(suite.T(), auth, "valid authentication failed") + + // Test that authenticating against incorrect portid fails + auth = suite.keeper.Authenticate(suite.ctx, capKey, "wrongportid") + require.False(suite.T(), auth, "invalid authentication failed") + + // Test that authenticating port against different valid + // capability key fails + capKey2 := suite.keeper.BindPort(suite.ctx, "otherportid") + auth = suite.keeper.Authenticate(suite.ctx, capKey2, validPort) + require.False(suite.T(), auth, "invalid authentication for different capKey failed") +} diff --git a/core/05-port/types/errors.go b/core/05-port/types/errors.go new file mode 100644 index 0000000000..23a2776f59 --- /dev/null +++ b/core/05-port/types/errors.go @@ -0,0 +1,13 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// IBC port sentinel errors +var ( + ErrPortExists = sdkerrors.Register(SubModuleName, 2, "port is already binded") + ErrPortNotFound = sdkerrors.Register(SubModuleName, 3, "port not found") + ErrInvalidPort = sdkerrors.Register(SubModuleName, 4, "invalid port") + ErrInvalidRoute = sdkerrors.Register(SubModuleName, 5, "route not found") +) diff --git a/core/05-port/types/keys.go b/core/05-port/types/keys.go new file mode 100644 index 0000000000..6e79bb5350 --- /dev/null +++ b/core/05-port/types/keys.go @@ -0,0 +1,15 @@ +package types + +const ( + // SubModuleName defines the IBC port name + SubModuleName = "port" + + // StoreKey is the store key string for IBC ports + StoreKey = SubModuleName + + // RouterKey is the message route for IBC ports + RouterKey = SubModuleName + + // QuerierRoute is the querier route for IBC ports + QuerierRoute = SubModuleName +) diff --git a/core/05-port/types/module.go b/core/05-port/types/module.go new file mode 100644 index 0000000000..4c68673201 --- /dev/null +++ b/core/05-port/types/module.go @@ -0,0 +1,78 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// IBCModule defines an interface that implements all the callbacks +// that modules must define as specified in ICS-26 +type IBCModule interface { + OnChanOpenInit( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version string, + ) error + + OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version, + counterpartyVersion string, + ) error + + OnChanOpenAck( + ctx sdk.Context, + portID, + channelID string, + counterpartyVersion string, + ) error + + OnChanOpenConfirm( + ctx sdk.Context, + portID, + channelID string, + ) error + + OnChanCloseInit( + ctx sdk.Context, + portID, + channelID string, + ) error + + OnChanCloseConfirm( + ctx sdk.Context, + portID, + channelID string, + ) error + + // OnRecvPacket must return the acknowledgement bytes + // In the case of an asynchronous acknowledgement, nil should be returned. + OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, + ) (*sdk.Result, []byte, error) + + OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + ) (*sdk.Result, error) + + OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, + ) (*sdk.Result, error) +} diff --git a/core/05-port/types/router.go b/core/05-port/types/router.go new file mode 100644 index 0000000000..6bfba9076a --- /dev/null +++ b/core/05-port/types/router.go @@ -0,0 +1,65 @@ +package types + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// The router is a map from module name to the IBCModule +// which contains all the module-defined callbacks required by ICS-26 +type Router struct { + routes map[string]IBCModule + sealed bool +} + +func NewRouter() *Router { + return &Router{ + routes: make(map[string]IBCModule), + } +} + +// Seal prevents the Router from any subsequent route handlers to be registered. +// Seal will panic if called more than once. +func (rtr *Router) Seal() { + if rtr.sealed { + panic("router already sealed") + } + rtr.sealed = true +} + +// Sealed returns a boolean signifying if the Router is sealed or not. +func (rtr Router) Sealed() bool { + return rtr.sealed +} + +// AddRoute adds IBCModule for a given module name. It returns the Router +// so AddRoute calls can be linked. It will panic if the Router is sealed. +func (rtr *Router) AddRoute(module string, cbs IBCModule) *Router { + if rtr.sealed { + panic(fmt.Sprintf("router sealed; cannot register %s route callbacks", module)) + } + if !sdk.IsAlphaNumeric(module) { + panic("route expressions can only contain alphanumeric characters") + } + if rtr.HasRoute(module) { + panic(fmt.Sprintf("route %s has already been registered", module)) + } + + rtr.routes[module] = cbs + return rtr +} + +// HasRoute returns true if the Router has a module registered or false otherwise. +func (rtr *Router) HasRoute(module string) bool { + _, ok := rtr.routes[module] + return ok +} + +// GetRoute returns a IBCModule for a given module. +func (rtr *Router) GetRoute(module string) (IBCModule, bool) { + if !rtr.HasRoute(module) { + return nil, false + } + return rtr.routes[module], true +} diff --git a/core/05-port/types/utils.go b/core/05-port/types/utils.go new file mode 100644 index 0000000000..a12f2ef7f5 --- /dev/null +++ b/core/05-port/types/utils.go @@ -0,0 +1,17 @@ +package types + +import "fmt" + +// GetModuleOwner enforces that only IBC and the module bound to port can own the capability +// while future implementations may allow multiple modules to bind to a port, currently we +// only allow one module to be bound to a port at any given time +func GetModuleOwner(modules []string) string { + if len(modules) != 2 { + panic(fmt.Sprintf("capability should only be owned by port or channel owner and ibc module, multiple owners currently not supported, owners: %v", modules)) + } + + if modules[0] == "ibc" { + return modules[1] + } + return modules[0] +} diff --git a/core/23-commitment/types/bench_test.go b/core/23-commitment/types/bench_test.go new file mode 100644 index 0000000000..83794fc6f6 --- /dev/null +++ b/core/23-commitment/types/bench_test.go @@ -0,0 +1,15 @@ +package types + +import ( + "testing" +) + +func BenchmarkMerkleProofEmpty(b *testing.B) { + b.ReportAllocs() + var mk MerkleProof + for i := 0; i < b.N; i++ { + if !mk.Empty() { + b.Fatal("supposed to be empty") + } + } +} diff --git a/core/23-commitment/types/codec.go b/core/23-commitment/types/codec.go new file mode 100644 index 0000000000..1195c7c26d --- /dev/null +++ b/core/23-commitment/types/codec.go @@ -0,0 +1,43 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// RegisterInterfaces registers the commitment interfaces to protobuf Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterInterface( + "ibc.core.commitment.v1.Root", + (*exported.Root)(nil), + ) + registry.RegisterInterface( + "ibc.core.commitment.v1.Prefix", + (*exported.Prefix)(nil), + ) + registry.RegisterInterface( + "ibc.core.commitment.v1.Path", + (*exported.Path)(nil), + ) + registry.RegisterInterface( + "ibc.core.commitment.v1.Proof", + (*exported.Proof)(nil), + ) + + registry.RegisterImplementations( + (*exported.Root)(nil), + &MerkleRoot{}, + ) + registry.RegisterImplementations( + (*exported.Prefix)(nil), + &MerklePrefix{}, + ) + registry.RegisterImplementations( + (*exported.Path)(nil), + &MerklePath{}, + ) + registry.RegisterImplementations( + (*exported.Proof)(nil), + &MerkleProof{}, + ) +} diff --git a/core/23-commitment/types/commitment.pb.go b/core/23-commitment/types/commitment.pb.go new file mode 100644 index 0000000000..ac4201c481 --- /dev/null +++ b/core/23-commitment/types/commitment.pb.go @@ -0,0 +1,863 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/commitment/v1/commitment.proto + +package types + +import ( + fmt "fmt" + _go "github.com/confio/ics23/go" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MerkleRoot defines a merkle root hash. +// In the Cosmos SDK, the AppHash of a block header becomes the root. +type MerkleRoot struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *MerkleRoot) Reset() { *m = MerkleRoot{} } +func (m *MerkleRoot) String() string { return proto.CompactTextString(m) } +func (*MerkleRoot) ProtoMessage() {} +func (*MerkleRoot) Descriptor() ([]byte, []int) { + return fileDescriptor_eb23d5444771a147, []int{0} +} +func (m *MerkleRoot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MerkleRoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MerkleRoot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MerkleRoot) XXX_Merge(src proto.Message) { + xxx_messageInfo_MerkleRoot.Merge(m, src) +} +func (m *MerkleRoot) XXX_Size() int { + return m.Size() +} +func (m *MerkleRoot) XXX_DiscardUnknown() { + xxx_messageInfo_MerkleRoot.DiscardUnknown(m) +} + +var xxx_messageInfo_MerkleRoot proto.InternalMessageInfo + +// MerklePrefix is merkle path prefixed to the key. +// The constructed key from the Path and the key will be append(Path.KeyPath, +// append(Path.KeyPrefix, key...)) +type MerklePrefix struct { + KeyPrefix []byte `protobuf:"bytes,1,opt,name=key_prefix,json=keyPrefix,proto3" json:"key_prefix,omitempty" yaml:"key_prefix"` +} + +func (m *MerklePrefix) Reset() { *m = MerklePrefix{} } +func (m *MerklePrefix) String() string { return proto.CompactTextString(m) } +func (*MerklePrefix) ProtoMessage() {} +func (*MerklePrefix) Descriptor() ([]byte, []int) { + return fileDescriptor_eb23d5444771a147, []int{1} +} +func (m *MerklePrefix) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MerklePrefix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MerklePrefix.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MerklePrefix) XXX_Merge(src proto.Message) { + xxx_messageInfo_MerklePrefix.Merge(m, src) +} +func (m *MerklePrefix) XXX_Size() int { + return m.Size() +} +func (m *MerklePrefix) XXX_DiscardUnknown() { + xxx_messageInfo_MerklePrefix.DiscardUnknown(m) +} + +var xxx_messageInfo_MerklePrefix proto.InternalMessageInfo + +func (m *MerklePrefix) GetKeyPrefix() []byte { + if m != nil { + return m.KeyPrefix + } + return nil +} + +// MerklePath is the path used to verify commitment proofs, which can be an +// arbitrary structured object (defined by a commitment type). +// MerklePath is represented from root-to-leaf +type MerklePath struct { + KeyPath []string `protobuf:"bytes,1,rep,name=key_path,json=keyPath,proto3" json:"key_path,omitempty" yaml:"key_path"` +} + +func (m *MerklePath) Reset() { *m = MerklePath{} } +func (*MerklePath) ProtoMessage() {} +func (*MerklePath) Descriptor() ([]byte, []int) { + return fileDescriptor_eb23d5444771a147, []int{2} +} +func (m *MerklePath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MerklePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MerklePath.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MerklePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_MerklePath.Merge(m, src) +} +func (m *MerklePath) XXX_Size() int { + return m.Size() +} +func (m *MerklePath) XXX_DiscardUnknown() { + xxx_messageInfo_MerklePath.DiscardUnknown(m) +} + +var xxx_messageInfo_MerklePath proto.InternalMessageInfo + +func (m *MerklePath) GetKeyPath() []string { + if m != nil { + return m.KeyPath + } + return nil +} + +// MerkleProof is a wrapper type over a chain of CommitmentProofs. +// It demonstrates membership or non-membership for an element or set of +// elements, verifiable in conjunction with a known commitment root. Proofs +// should be succinct. +// MerkleProofs are ordered from leaf-to-root +type MerkleProof struct { + Proofs []*_go.CommitmentProof `protobuf:"bytes,1,rep,name=proofs,proto3" json:"proofs,omitempty"` +} + +func (m *MerkleProof) Reset() { *m = MerkleProof{} } +func (m *MerkleProof) String() string { return proto.CompactTextString(m) } +func (*MerkleProof) ProtoMessage() {} +func (*MerkleProof) Descriptor() ([]byte, []int) { + return fileDescriptor_eb23d5444771a147, []int{3} +} +func (m *MerkleProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MerkleProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MerkleProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MerkleProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_MerkleProof.Merge(m, src) +} +func (m *MerkleProof) XXX_Size() int { + return m.Size() +} +func (m *MerkleProof) XXX_DiscardUnknown() { + xxx_messageInfo_MerkleProof.DiscardUnknown(m) +} + +var xxx_messageInfo_MerkleProof proto.InternalMessageInfo + +func (m *MerkleProof) GetProofs() []*_go.CommitmentProof { + if m != nil { + return m.Proofs + } + return nil +} + +func init() { + proto.RegisterType((*MerkleRoot)(nil), "ibcgo.core.commitment.v1.MerkleRoot") + proto.RegisterType((*MerklePrefix)(nil), "ibcgo.core.commitment.v1.MerklePrefix") + proto.RegisterType((*MerklePath)(nil), "ibcgo.core.commitment.v1.MerklePath") + proto.RegisterType((*MerkleProof)(nil), "ibcgo.core.commitment.v1.MerkleProof") +} + +func init() { + proto.RegisterFile("ibcgo/core/commitment/v1/commitment.proto", fileDescriptor_eb23d5444771a147) +} + +var fileDescriptor_eb23d5444771a147 = []byte{ + // 329 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xbf, 0x4e, 0xc3, 0x30, + 0x10, 0xc6, 0x13, 0x51, 0x15, 0xea, 0x56, 0x42, 0xa4, 0x80, 0xaa, 0x0e, 0x29, 0xca, 0x80, 0xca, + 0x50, 0x5b, 0x6d, 0x99, 0x2a, 0xb1, 0x04, 0x36, 0x84, 0x54, 0x65, 0x64, 0x41, 0x89, 0xe5, 0x24, + 0x56, 0x9b, 0x5e, 0x14, 0x9b, 0x8a, 0xbc, 0x01, 0x23, 0x23, 0x23, 0x8f, 0xc3, 0xd8, 0x91, 0xa9, + 0x42, 0xed, 0x1b, 0xf4, 0x09, 0x90, 0x6d, 0x0a, 0xd9, 0xee, 0x7c, 0xbf, 0xfb, 0xe3, 0xef, 0x43, + 0x57, 0x3c, 0xa2, 0x09, 0x10, 0x0a, 0x05, 0x23, 0x14, 0xb2, 0x8c, 0xcb, 0x8c, 0x2d, 0x24, 0x59, + 0x0e, 0x2b, 0x19, 0xce, 0x0b, 0x90, 0xe0, 0x74, 0x34, 0x8a, 0x15, 0x8a, 0x2b, 0xc5, 0xe5, 0xb0, + 0x7b, 0x9a, 0x40, 0x02, 0x1a, 0x22, 0x2a, 0x32, 0x7c, 0xb7, 0x4d, 0x61, 0x11, 0x73, 0x20, 0x79, + 0x01, 0x10, 0x0b, 0xf3, 0xe8, 0x5d, 0x22, 0xf4, 0xc0, 0x8a, 0xd9, 0x9c, 0x05, 0x00, 0xd2, 0x71, + 0x50, 0x2d, 0x0d, 0x45, 0xda, 0xb1, 0x2f, 0xec, 0x7e, 0x2b, 0xd0, 0xf1, 0xa4, 0xf6, 0xfa, 0xd1, + 0xb3, 0xbc, 0x3b, 0xd4, 0x32, 0xdc, 0xb4, 0x60, 0x31, 0x7f, 0x71, 0xae, 0x11, 0x9a, 0xb1, 0xf2, + 0x29, 0xd7, 0x99, 0xe1, 0xfd, 0xb3, 0xdd, 0xba, 0x77, 0x52, 0x86, 0xd9, 0x7c, 0xe2, 0xfd, 0xd7, + 0xbc, 0xa0, 0x31, 0x63, 0xa5, 0xe9, 0xf2, 0xfc, 0xfd, 0xb6, 0x69, 0x28, 0x53, 0x07, 0xa3, 0x23, + 0xcd, 0x85, 0x52, 0x6d, 0x3c, 0xe8, 0x37, 0xfc, 0xf6, 0x6e, 0xdd, 0x3b, 0xae, 0x4c, 0x08, 0x65, + 0xea, 0x05, 0x87, 0xaa, 0x3f, 0x94, 0xe9, 0xa4, 0xf6, 0xae, 0x2e, 0xb9, 0x41, 0xcd, 0xfd, 0x25, + 0x00, 0xb1, 0x83, 0x51, 0xdd, 0x7c, 0x48, 0x8f, 0x68, 0x8e, 0xce, 0x31, 0xa7, 0x62, 0x34, 0xc6, + 0xb7, 0x7f, 0x8a, 0x68, 0x2e, 0xf8, 0xa5, 0xfc, 0xfb, 0xcf, 0x8d, 0x6b, 0xaf, 0x36, 0xae, 0xfd, + 0xbd, 0x71, 0xed, 0xb7, 0xad, 0x6b, 0xad, 0xb6, 0xae, 0xf5, 0xb5, 0x75, 0xad, 0xc7, 0x61, 0xc2, + 0x65, 0xfa, 0x1c, 0x29, 0x2d, 0x09, 0x05, 0x91, 0x81, 0x20, 0x3c, 0xa2, 0x83, 0xbd, 0x1b, 0xa3, + 0xf1, 0xa0, 0x62, 0x88, 0x2c, 0x73, 0x26, 0xa2, 0xba, 0x16, 0x71, 0xfc, 0x13, 0x00, 0x00, 0xff, + 0xff, 0xe6, 0x8b, 0xf4, 0x8a, 0xb6, 0x01, 0x00, 0x00, +} + +func (m *MerkleRoot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MerkleRoot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MerkleRoot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintCommitment(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MerklePrefix) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MerklePrefix) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MerklePrefix) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.KeyPrefix) > 0 { + i -= len(m.KeyPrefix) + copy(dAtA[i:], m.KeyPrefix) + i = encodeVarintCommitment(dAtA, i, uint64(len(m.KeyPrefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MerklePath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MerklePath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MerklePath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.KeyPath) > 0 { + for iNdEx := len(m.KeyPath) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.KeyPath[iNdEx]) + copy(dAtA[i:], m.KeyPath[iNdEx]) + i = encodeVarintCommitment(dAtA, i, uint64(len(m.KeyPath[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MerkleProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MerkleProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MerkleProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Proofs) > 0 { + for iNdEx := len(m.Proofs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Proofs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommitment(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintCommitment(dAtA []byte, offset int, v uint64) int { + offset -= sovCommitment(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MerkleRoot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovCommitment(uint64(l)) + } + return n +} + +func (m *MerklePrefix) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.KeyPrefix) + if l > 0 { + n += 1 + l + sovCommitment(uint64(l)) + } + return n +} + +func (m *MerklePath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.KeyPath) > 0 { + for _, s := range m.KeyPath { + l = len(s) + n += 1 + l + sovCommitment(uint64(l)) + } + } + return n +} + +func (m *MerkleProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Proofs) > 0 { + for _, e := range m.Proofs { + l = e.Size() + n += 1 + l + sovCommitment(uint64(l)) + } + } + return n +} + +func sovCommitment(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCommitment(x uint64) (n int) { + return sovCommitment(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MerkleRoot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MerkleRoot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MerkleRoot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCommitment + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCommitment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommitment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommitment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MerklePrefix) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MerklePrefix: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MerklePrefix: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPrefix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCommitment + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCommitment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPrefix = append(m.KeyPrefix[:0], dAtA[iNdEx:postIndex]...) + if m.KeyPrefix == nil { + m.KeyPrefix = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommitment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommitment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MerklePath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MerklePath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MerklePath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCommitment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCommitment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyPath = append(m.KeyPath, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommitment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommitment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MerkleProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MerkleProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MerkleProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proofs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommitment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommitment + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommitment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proofs = append(m.Proofs, &_go.CommitmentProof{}) + if err := m.Proofs[len(m.Proofs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommitment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommitment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCommitment(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommitment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommitment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCommitment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCommitment + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCommitment + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCommitment + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCommitment = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCommitment = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCommitment = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/23-commitment/types/commitment_test.go b/core/23-commitment/types/commitment_test.go new file mode 100644 index 0000000000..932599e539 --- /dev/null +++ b/core/23-commitment/types/commitment_test.go @@ -0,0 +1,37 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/store/iavl" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + + dbm "github.com/tendermint/tm-db" +) + +type MerkleTestSuite struct { + suite.Suite + + store *rootmulti.Store + storeKey *storetypes.KVStoreKey + iavlStore *iavl.Store +} + +func (suite *MerkleTestSuite) SetupTest() { + db := dbm.NewMemDB() + suite.store = rootmulti.NewStore(db) + + suite.storeKey = storetypes.NewKVStoreKey("iavlStoreKey") + + suite.store.MountStoreWithDB(suite.storeKey, storetypes.StoreTypeIAVL, nil) + suite.store.LoadVersion(0) + + suite.iavlStore = suite.store.GetCommitStore(suite.storeKey).(*iavl.Store) +} + +func TestMerkleTestSuite(t *testing.T) { + suite.Run(t, new(MerkleTestSuite)) +} diff --git a/core/23-commitment/types/errors.go b/core/23-commitment/types/errors.go new file mode 100644 index 0000000000..7191baef1c --- /dev/null +++ b/core/23-commitment/types/errors.go @@ -0,0 +1,15 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// SubModuleName is the error codespace +const SubModuleName string = "commitment" + +// IBC connection sentinel errors +var ( + ErrInvalidProof = sdkerrors.Register(SubModuleName, 2, "invalid proof") + ErrInvalidPrefix = sdkerrors.Register(SubModuleName, 3, "invalid prefix") + ErrInvalidMerkleProof = sdkerrors.Register(SubModuleName, 4, "invalid merkle proof") +) diff --git a/core/23-commitment/types/merkle.go b/core/23-commitment/types/merkle.go new file mode 100644 index 0000000000..e90fccc34b --- /dev/null +++ b/core/23-commitment/types/merkle.go @@ -0,0 +1,312 @@ +package types + +import ( + "bytes" + "fmt" + "net/url" + + ics23 "github.com/confio/ics23/go" + "github.com/gogo/protobuf/proto" + tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// var representing the proofspecs for a SDK chain +var sdkSpecs = []*ics23.ProofSpec{ics23.IavlSpec, ics23.TendermintSpec} + +// ICS 023 Merkle Types Implementation +// +// This file defines Merkle commitment types that implements ICS 023. + +// Merkle proof implementation of the Proof interface +// Applied on SDK-based IBC implementation +var _ exported.Root = (*MerkleRoot)(nil) + +// GetSDKSpecs is a getter function for the proofspecs of an sdk chain +func GetSDKSpecs() []*ics23.ProofSpec { + return sdkSpecs +} + +// NewMerkleRoot constructs a new MerkleRoot +func NewMerkleRoot(hash []byte) MerkleRoot { + return MerkleRoot{ + Hash: hash, + } +} + +// GetHash implements RootI interface +func (mr MerkleRoot) GetHash() []byte { + return mr.Hash +} + +// Empty returns true if the root is empty +func (mr MerkleRoot) Empty() bool { + return len(mr.GetHash()) == 0 +} + +var _ exported.Prefix = (*MerklePrefix)(nil) + +// NewMerklePrefix constructs new MerklePrefix instance +func NewMerklePrefix(keyPrefix []byte) MerklePrefix { + return MerklePrefix{ + KeyPrefix: keyPrefix, + } +} + +// Bytes returns the key prefix bytes +func (mp MerklePrefix) Bytes() []byte { + return mp.KeyPrefix +} + +// Empty returns true if the prefix is empty +func (mp MerklePrefix) Empty() bool { + return len(mp.Bytes()) == 0 +} + +var _ exported.Path = (*MerklePath)(nil) + +// NewMerklePath creates a new MerklePath instance +// The keys must be passed in from root-to-leaf order +func NewMerklePath(keyPath ...string) MerklePath { + return MerklePath{ + KeyPath: keyPath, + } +} + +// String implements fmt.Stringer. +// This represents the path in the same way the tendermint KeyPath will +// represent a key path. The backslashes partition the key path into +// the respective stores they belong to. +func (mp MerklePath) String() string { + pathStr := "" + for _, k := range mp.KeyPath { + pathStr += "/" + url.PathEscape(k) + } + return pathStr +} + +// Pretty returns the unescaped path of the URL string. +// This function will unescape any backslash within a particular store key. +// This makes the keypath more human-readable while removing information +// about the exact partitions in the key path. +func (mp MerklePath) Pretty() string { + path, err := url.PathUnescape(mp.String()) + if err != nil { + panic(err) + } + return path +} + +// GetKey will return a byte representation of the key +// after URL escaping the key element +func (mp MerklePath) GetKey(i uint64) ([]byte, error) { + if i >= uint64(len(mp.KeyPath)) { + return nil, fmt.Errorf("index out of range. %d (index) >= %d (len)", i, len(mp.KeyPath)) + } + key, err := url.PathUnescape(mp.KeyPath[i]) + if err != nil { + return nil, err + } + return []byte(key), nil +} + +// Empty returns true if the path is empty +func (mp MerklePath) Empty() bool { + return len(mp.KeyPath) == 0 +} + +// ApplyPrefix constructs a new commitment path from the arguments. It prepends the prefix key +// with the given path. +func ApplyPrefix(prefix exported.Prefix, path MerklePath) (MerklePath, error) { + if prefix == nil || prefix.Empty() { + return MerklePath{}, sdkerrors.Wrap(ErrInvalidPrefix, "prefix can't be empty") + } + return NewMerklePath(append([]string{string(prefix.Bytes())}, path.KeyPath...)...), nil +} + +var _ exported.Proof = (*MerkleProof)(nil) + +// VerifyMembership verifies the membership pf a merkle proof against the given root, path, and value. +func (proof MerkleProof) VerifyMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path, value []byte) error { + if err := proof.validateVerificationArgs(specs, root); err != nil { + return err + } + + // VerifyMembership specific argument validation + mpath, ok := path.(MerklePath) + if !ok { + return sdkerrors.Wrapf(ErrInvalidProof, "path %v is not of type MerklePath", path) + } + if len(mpath.KeyPath) != len(specs) { + return sdkerrors.Wrapf(ErrInvalidProof, "path length %d not same as proof %d", + len(mpath.KeyPath), len(specs)) + } + if len(value) == 0 { + return sdkerrors.Wrap(ErrInvalidProof, "empty value in membership proof") + } + + // Since every proof in chain is a membership proof we can use verifyChainedMembershipProof from index 0 + // to validate entire proof + if err := verifyChainedMembershipProof(root.GetHash(), specs, proof.Proofs, mpath, value, 0); err != nil { + return err + } + return nil +} + +// VerifyNonMembership verifies the absence of a merkle proof against the given root and path. +// VerifyNonMembership verifies a chained proof where the absence of a given path is proven +// at the lowest subtree and then each subtree's inclusion is proved up to the final root. +func (proof MerkleProof) VerifyNonMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path) error { + if err := proof.validateVerificationArgs(specs, root); err != nil { + return err + } + + // VerifyNonMembership specific argument validation + mpath, ok := path.(MerklePath) + if !ok { + return sdkerrors.Wrapf(ErrInvalidProof, "path %v is not of type MerkleProof", path) + } + if len(mpath.KeyPath) != len(specs) { + return sdkerrors.Wrapf(ErrInvalidProof, "path length %d not same as proof %d", + len(mpath.KeyPath), len(specs)) + } + + switch proof.Proofs[0].Proof.(type) { + case *ics23.CommitmentProof_Nonexist: + // VerifyNonMembership will verify the absence of key in lowest subtree, and then chain inclusion proofs + // of all subroots up to final root + subroot, err := proof.Proofs[0].Calculate() + if err != nil { + return sdkerrors.Wrapf(ErrInvalidProof, "could not calculate root for proof index 0, merkle tree is likely empty. %v", err) + } + key, err := mpath.GetKey(uint64(len(mpath.KeyPath) - 1)) + if err != nil { + return sdkerrors.Wrapf(ErrInvalidProof, "could not retrieve key bytes for key: %s", mpath.KeyPath[len(mpath.KeyPath)-1]) + } + if ok := ics23.VerifyNonMembership(specs[0], subroot, proof.Proofs[0], key); !ok { + return sdkerrors.Wrapf(ErrInvalidProof, "could not verify absence of key %s. Please ensure that the path is correct.", string(key)) + } + + // Verify chained membership proof starting from index 1 with value = subroot + if err := verifyChainedMembershipProof(root.GetHash(), specs, proof.Proofs, mpath, subroot, 1); err != nil { + return err + } + case *ics23.CommitmentProof_Exist: + return sdkerrors.Wrapf(ErrInvalidProof, + "got ExistenceProof in VerifyNonMembership. If this is unexpected, please ensure that proof was queried with the correct key.") + default: + return sdkerrors.Wrapf(ErrInvalidProof, + "expected proof type: %T, got: %T", &ics23.CommitmentProof_Exist{}, proof.Proofs[0].Proof) + } + return nil +} + +// BatchVerifyMembership verifies a group of key value pairs against the given root +// NOTE: Currently left unimplemented as it is unused +func (proof MerkleProof) BatchVerifyMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path, items map[string][]byte) error { + return sdkerrors.Wrap(ErrInvalidProof, "batch proofs are currently unsupported") +} + +// BatchVerifyNonMembership verifies absence of a group of keys against the given root +// NOTE: Currently left unimplemented as it is unused +func (proof MerkleProof) BatchVerifyNonMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path, items [][]byte) error { + return sdkerrors.Wrap(ErrInvalidProof, "batch proofs are currently unsupported") +} + +// verifyChainedMembershipProof takes a list of proofs and specs and verifies each proof sequentially ensuring that the value is committed to +// by first proof and each subsequent subroot is committed to by the next subroot and checking that the final calculated root is equal to the given roothash. +// The proofs and specs are passed in from lowest subtree to the highest subtree, but the keys are passed in from highest subtree to lowest. +// The index specifies what index to start chaining the membership proofs, this is useful since the lowest proof may not be a membership proof, thus we +// will want to start the membership proof chaining from index 1 with value being the lowest subroot +func verifyChainedMembershipProof(root []byte, specs []*ics23.ProofSpec, proofs []*ics23.CommitmentProof, keys MerklePath, value []byte, index int) error { + var ( + subroot []byte + err error + ) + // Initialize subroot to value since the proofs list may be empty. + // This may happen if this call is verifying intermediate proofs after the lowest proof has been executed. + // In this case, there may be no intermediate proofs to verify and we just check that lowest proof root equals final root + subroot = value + for i := index; i < len(proofs); i++ { + switch proofs[i].Proof.(type) { + case *ics23.CommitmentProof_Exist: + subroot, err = proofs[i].Calculate() + if err != nil { + return sdkerrors.Wrapf(ErrInvalidProof, "could not calculate proof root at index %d, merkle tree may be empty. %v", i, err) + } + // Since keys are passed in from highest to lowest, we must grab their indices in reverse order + // from the proofs and specs which are lowest to highest + key, err := keys.GetKey(uint64(len(keys.KeyPath) - 1 - i)) + if err != nil { + return sdkerrors.Wrapf(ErrInvalidProof, "could not retrieve key bytes for key %s: %v", keys.KeyPath[len(keys.KeyPath)-1-i], err) + } + + // verify membership of the proof at this index with appropriate key and value + if ok := ics23.VerifyMembership(specs[i], subroot, proofs[i], key, value); !ok { + return sdkerrors.Wrapf(ErrInvalidProof, + "chained membership proof failed to verify membership of value: %X in subroot %X at index %d. Please ensure the path and value are both correct.", + value, subroot, i) + } + // Set value to subroot so that we verify next proof in chain commits to this subroot + value = subroot + case *ics23.CommitmentProof_Nonexist: + return sdkerrors.Wrapf(ErrInvalidProof, + "chained membership proof contains nonexistence proof at index %d. If this is unexpected, please ensure that proof was queried from the height that contained the value in store and was queried with the correct key.", + i) + default: + return sdkerrors.Wrapf(ErrInvalidProof, + "expected proof type: %T, got: %T", &ics23.CommitmentProof_Exist{}, proofs[i].Proof) + } + } + // Check that chained proof root equals passed-in root + if !bytes.Equal(root, subroot) { + return sdkerrors.Wrapf(ErrInvalidProof, + "proof did not commit to expected root: %X, got: %X. Please ensure proof was submitted with correct proofHeight and to the correct chain.", + root, subroot) + } + return nil +} + +// blankMerkleProof and blankProofOps will be used to compare against their zero values, +// and are declared as globals to avoid having to unnecessarily re-allocate on every comparison. +var blankMerkleProof = &MerkleProof{} +var blankProofOps = &tmcrypto.ProofOps{} + +// Empty returns true if the root is empty +func (proof *MerkleProof) Empty() bool { + return proof == nil || proto.Equal(proof, blankMerkleProof) || proto.Equal(proof, blankProofOps) +} + +// ValidateBasic checks if the proof is empty. +func (proof MerkleProof) ValidateBasic() error { + if proof.Empty() { + return ErrInvalidProof + } + return nil +} + +// validateVerificationArgs verifies the proof arguments are valid +func (proof MerkleProof) validateVerificationArgs(specs []*ics23.ProofSpec, root exported.Root) error { + if proof.Empty() { + return sdkerrors.Wrap(ErrInvalidMerkleProof, "proof cannot be empty") + } + + if root == nil || root.Empty() { + return sdkerrors.Wrap(ErrInvalidMerkleProof, "root cannot be empty") + } + + if len(specs) != len(proof.Proofs) { + return sdkerrors.Wrapf(ErrInvalidMerkleProof, + "length of specs: %d not equal to length of proof: %d", + len(specs), len(proof.Proofs)) + } + + for i, spec := range specs { + if spec == nil { + return sdkerrors.Wrapf(ErrInvalidProof, "spec at position %d is nil", i) + } + } + return nil +} diff --git a/core/23-commitment/types/merkle_test.go b/core/23-commitment/types/merkle_test.go new file mode 100644 index 0000000000..3c53847fad --- /dev/null +++ b/core/23-commitment/types/merkle_test.go @@ -0,0 +1,172 @@ +package types_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" +) + +func (suite *MerkleTestSuite) TestVerifyMembership() { + suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := suite.store.Commit() + + res := suite.store.Query(abci.RequestQuery{ + Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + Data: []byte("MYKEY"), + Prove: true, + }) + require.NotNil(suite.T(), res.ProofOps) + + proof, err := types.ConvertProofs(res.ProofOps) + require.NoError(suite.T(), err) + + suite.Require().NoError(proof.ValidateBasic()) + suite.Require().Error(types.MerkleProof{}.ValidateBasic()) + + cases := []struct { + name string + root []byte + pathArr []string + value []byte + malleate func() + shouldPass bool + }{ + {"valid proof", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, []byte("MYVALUE"), func() {}, true}, // valid proof + {"wrong value", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, []byte("WRONGVALUE"), func() {}, false}, // invalid proof with wrong value + {"nil value", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, []byte(nil), func() {}, false}, // invalid proof with nil value + {"wrong key", cid.Hash, []string{suite.storeKey.Name(), "NOTMYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong key + {"wrong path 1", cid.Hash, []string{suite.storeKey.Name(), "MYKEY", "MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path + {"wrong path 2", cid.Hash, []string{suite.storeKey.Name()}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path + {"wrong path 3", cid.Hash, []string{"MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path + {"wrong storekey", cid.Hash, []string{"otherStoreKey", "MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong store prefix + {"wrong root", []byte("WRONGROOT"), []string{suite.storeKey.Name(), "MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong root + {"nil root", []byte(nil), []string{suite.storeKey.Name(), "MYKEY"}, []byte("MYVALUE"), func() {}, false}, // invalid proof with nil root + {"proof is wrong length", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, []byte("MYVALUE"), func() { + proof = types.MerkleProof{ + Proofs: proof.Proofs[1:], + } + }, false}, // invalid proof with wrong length + + } + + for i, tc := range cases { + tc := tc + suite.Run(tc.name, func() { + tc.malleate() + + root := types.NewMerkleRoot(tc.root) + path := types.NewMerklePath(tc.pathArr...) + + err := proof.VerifyMembership(types.GetSDKSpecs(), &root, path, tc.value) + + if tc.shouldPass { + // nolint: scopelint + suite.Require().NoError(err, "test case %d should have passed", i) + } else { + // nolint: scopelint + suite.Require().Error(err, "test case %d should have failed", i) + } + }) + } + +} + +func (suite *MerkleTestSuite) TestVerifyNonMembership() { + suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := suite.store.Commit() + + // Get Proof + res := suite.store.Query(abci.RequestQuery{ + Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + Data: []byte("MYABSENTKEY"), + Prove: true, + }) + require.NotNil(suite.T(), res.ProofOps) + + proof, err := types.ConvertProofs(res.ProofOps) + require.NoError(suite.T(), err) + + suite.Require().NoError(proof.ValidateBasic()) + + cases := []struct { + name string + root []byte + pathArr []string + malleate func() + shouldPass bool + }{ + {"valid proof", cid.Hash, []string{suite.storeKey.Name(), "MYABSENTKEY"}, func() {}, true}, // valid proof + {"wrong key", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, func() {}, false}, // invalid proof with existent key + {"wrong path 1", cid.Hash, []string{suite.storeKey.Name(), "MYKEY", "MYABSENTKEY"}, func() {}, false}, // invalid proof with wrong path + {"wrong path 2", cid.Hash, []string{suite.storeKey.Name(), "MYABSENTKEY", "MYKEY"}, func() {}, false}, // invalid proof with wrong path + {"wrong path 3", cid.Hash, []string{suite.storeKey.Name()}, func() {}, false}, // invalid proof with wrong path + {"wrong path 4", cid.Hash, []string{"MYABSENTKEY"}, func() {}, false}, // invalid proof with wrong path + {"wrong storeKey", cid.Hash, []string{"otherStoreKey", "MYABSENTKEY"}, func() {}, false}, // invalid proof with wrong store prefix + {"wrong root", []byte("WRONGROOT"), []string{suite.storeKey.Name(), "MYABSENTKEY"}, func() {}, false}, // invalid proof with wrong root + {"nil root", []byte(nil), []string{suite.storeKey.Name(), "MYABSENTKEY"}, func() {}, false}, // invalid proof with nil root + {"proof is wrong length", cid.Hash, []string{suite.storeKey.Name(), "MYKEY"}, func() { + proof = types.MerkleProof{ + Proofs: proof.Proofs[1:], + } + }, false}, // invalid proof with wrong length + + } + + for i, tc := range cases { + tc := tc + + suite.Run(tc.name, func() { + tc.malleate() + + root := types.NewMerkleRoot(tc.root) + path := types.NewMerklePath(tc.pathArr...) + + err := proof.VerifyNonMembership(types.GetSDKSpecs(), &root, path) + + if tc.shouldPass { + // nolint: scopelint + suite.Require().NoError(err, "test case %d should have passed", i) + } else { + // nolint: scopelint + suite.Require().Error(err, "test case %d should have failed", i) + } + }) + } + +} + +func TestApplyPrefix(t *testing.T) { + prefix := types.NewMerklePrefix([]byte("storePrefixKey")) + + pathStr := "pathone/pathtwo/paththree/key" + path := types.MerklePath{ + KeyPath: []string{pathStr}, + } + + prefixedPath, err := types.ApplyPrefix(prefix, path) + require.NoError(t, err, "valid prefix returns error") + + require.Equal(t, "/storePrefixKey/"+pathStr, prefixedPath.Pretty(), "Prefixed path incorrect") + require.Equal(t, "/storePrefixKey/pathone%2Fpathtwo%2Fpaththree%2Fkey", prefixedPath.String(), "Prefixed escaped path incorrect") +} + +func TestString(t *testing.T) { + path := types.NewMerklePath("rootKey", "storeKey", "path/to/leaf") + + require.Equal(t, "/rootKey/storeKey/path%2Fto%2Fleaf", path.String(), "path String returns unxpected value") + require.Equal(t, "/rootKey/storeKey/path/to/leaf", path.Pretty(), "path's pretty string representation is incorrect") + + onePath := types.NewMerklePath("path/to/leaf") + + require.Equal(t, "/path%2Fto%2Fleaf", onePath.String(), "one element path does not have correct string representation") + require.Equal(t, "/path/to/leaf", onePath.Pretty(), "one element path has incorrect pretty string representation") + + zeroPath := types.NewMerklePath() + + require.Equal(t, "", zeroPath.String(), "zero element path does not have correct string representation") + require.Equal(t, "", zeroPath.Pretty(), "zero element path does not have correct pretty string representation") +} diff --git a/core/23-commitment/types/utils.go b/core/23-commitment/types/utils.go new file mode 100644 index 0000000000..e662f77265 --- /dev/null +++ b/core/23-commitment/types/utils.go @@ -0,0 +1,28 @@ +package types + +import ( + ics23 "github.com/confio/ics23/go" + crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// ConvertProofs converts crypto.ProofOps into MerkleProof +func ConvertProofs(tmProof *crypto.ProofOps) (MerkleProof, error) { + if tmProof == nil { + return MerkleProof{}, sdkerrors.Wrapf(ErrInvalidMerkleProof, "tendermint proof is nil") + } + // Unmarshal all proof ops to CommitmentProof + proofs := make([]*ics23.CommitmentProof, len(tmProof.Ops)) + for i, op := range tmProof.Ops { + var p ics23.CommitmentProof + err := p.Unmarshal(op.Data) + if err != nil || p.Proof == nil { + return MerkleProof{}, sdkerrors.Wrapf(ErrInvalidMerkleProof, "could not unmarshal proof op into CommitmentProof at index %d: %v", i, err) + } + proofs[i] = &p + } + return MerkleProof{ + Proofs: proofs, + }, nil +} diff --git a/core/23-commitment/types/utils_test.go b/core/23-commitment/types/utils_test.go new file mode 100644 index 0000000000..f852fb6c2c --- /dev/null +++ b/core/23-commitment/types/utils_test.go @@ -0,0 +1,98 @@ +package types_test + +import ( + "fmt" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" +) + +func (suite *MerkleTestSuite) TestConvertProofs() { + suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := suite.store.Commit() + + root := types.NewMerkleRoot(cid.Hash) + existsPath := types.NewMerklePath(suite.storeKey.Name(), "MYKEY") + nonexistPath := types.NewMerklePath(suite.storeKey.Name(), "NOTMYKEY") + value := []byte("MYVALUE") + + var proofOps *crypto.ProofOps + testcases := []struct { + name string + malleate func() + keyExists bool + expPass bool + }{ + { + "success for ExistenceProof", + func() { + res := suite.store.Query(abci.RequestQuery{ + Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + Data: []byte("MYKEY"), + Prove: true, + }) + require.NotNil(suite.T(), res.ProofOps) + + proofOps = res.ProofOps + }, + true, true, + }, + { + "success for NonexistenceProof", + func() { + res := suite.store.Query(abci.RequestQuery{ + Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + Data: []byte("NOTMYKEY"), + Prove: true, + }) + require.NotNil(suite.T(), res.ProofOps) + + proofOps = res.ProofOps + }, + false, true, + }, + { + "nil proofOps", + func() { + proofOps = nil + }, + true, false, + }, + { + "proof op data is nil", + func() { + res := suite.store.Query(abci.RequestQuery{ + Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + Data: []byte("MYKEY"), + Prove: true, + }) + require.NotNil(suite.T(), res.ProofOps) + + proofOps = res.ProofOps + proofOps.Ops[0].Data = nil + }, + true, false, + }, + } + + for _, tc := range testcases { + tc.malleate() + + proof, err := types.ConvertProofs(proofOps) + if tc.expPass { + suite.Require().NoError(err, "ConvertProofs unexpectedly returned error for case: %s", tc.name) + if tc.keyExists { + err := proof.VerifyMembership(types.GetSDKSpecs(), &root, existsPath, value) + suite.Require().NoError(err, "converted proof failed to verify membership for case: %s", tc.name) + } else { + err := proof.VerifyNonMembership(types.GetSDKSpecs(), &root, nonexistPath) + suite.Require().NoError(err, "converted proof failed to verify membership for case: %s", tc.name) + } + } else { + suite.Require().Error(err, "ConvertProofs passed on invalid case for case: %s", tc.name) + } + } +} diff --git a/core/24-host/errors.go b/core/24-host/errors.go new file mode 100644 index 0000000000..fe8129bde8 --- /dev/null +++ b/core/24-host/errors.go @@ -0,0 +1,15 @@ +package host + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// SubModuleName defines the ICS 24 host +const SubModuleName = "host" + +// IBC client sentinel errors +var ( + ErrInvalidID = sdkerrors.Register(SubModuleName, 2, "invalid identifier") + ErrInvalidPath = sdkerrors.Register(SubModuleName, 3, "invalid path") + ErrInvalidPacket = sdkerrors.Register(SubModuleName, 4, "invalid packet") +) diff --git a/core/24-host/keys.go b/core/24-host/keys.go new file mode 100644 index 0000000000..21f4bc4309 --- /dev/null +++ b/core/24-host/keys.go @@ -0,0 +1,235 @@ +package host + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +const ( + // ModuleName is the name of the IBC module + ModuleName = "ibc" + + // StoreKey is the string store representation + StoreKey string = ModuleName + + // QuerierRoute is the querier route for the IBC module + QuerierRoute string = ModuleName + + // RouterKey is the msg router key for the IBC module + RouterKey string = ModuleName +) + +// KVStore key prefixes for IBC +var ( + KeyClientStorePrefix = []byte("clients") +) + +// KVStore key prefixes for IBC +const ( + KeyClientState = "clientState" + KeyConsensusStatePrefix = "consensusStates" + KeyConnectionPrefix = "connections" + KeyChannelEndPrefix = "channelEnds" + KeyChannelPrefix = "channels" + KeyPortPrefix = "ports" + KeySequencePrefix = "sequences" + KeyChannelCapabilityPrefix = "capabilities" + KeyNextSeqSendPrefix = "nextSequenceSend" + KeyNextSeqRecvPrefix = "nextSequenceRecv" + KeyNextSeqAckPrefix = "nextSequenceAck" + KeyPacketCommitmentPrefix = "commitments" + KeyPacketAckPrefix = "acks" + KeyPacketReceiptPrefix = "receipts" +) + +// FullClientPath returns the full path of a specific client path in the format: +// "clients/{clientID}/{path}" as a string. +func FullClientPath(clientID string, path string) string { + return fmt.Sprintf("%s/%s/%s", KeyClientStorePrefix, clientID, path) +} + +// FullClientKey returns the full path of specific client path in the format: +// "clients/{clientID}/{path}" as a byte array. +func FullClientKey(clientID string, path []byte) []byte { + return []byte(FullClientPath(clientID, string(path))) +} + +// ICS02 +// The following paths are the keys to the store as defined in https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics#path-space + +// FullClientStatePath takes a client identifier and returns a Path under which to store a +// particular client state +func FullClientStatePath(clientID string) string { + return FullClientPath(clientID, KeyClientState) +} + +// FullClientStateKey takes a client identifier and returns a Key under which to store a +// particular client state. +func FullClientStateKey(clientID string) []byte { + return FullClientKey(clientID, []byte(KeyClientState)) +} + +// ClientStateKey returns a store key under which a particular client state is stored +// in a client prefixed store +func ClientStateKey() []byte { + return []byte(KeyClientState) +} + +// FullConsensusStatePath takes a client identifier and returns a Path under which to +// store the consensus state of a client. +func FullConsensusStatePath(clientID string, height exported.Height) string { + return FullClientPath(clientID, ConsensusStatePath(height)) +} + +// FullConsensusStateKey returns the store key for the consensus state of a particular +// client. +func FullConsensusStateKey(clientID string, height exported.Height) []byte { + return []byte(FullConsensusStatePath(clientID, height)) +} + +// ConsensusStatePath returns the suffix store key for the consensus state at a +// particular height stored in a client prefixed store. +func ConsensusStatePath(height exported.Height) string { + return fmt.Sprintf("%s/%s", KeyConsensusStatePrefix, height) +} + +// ConsensusStateKey returns the store key for a the consensus state of a particular +// client stored in a client prefixed store. +func ConsensusStateKey(height exported.Height) []byte { + return []byte(ConsensusStatePath(height)) +} + +// ICS03 +// The following paths are the keys to the store as defined in https://github.com/cosmos/ics/tree/master/spec/ics-003-connection-semantics#store-paths + +// ClientConnectionsPath defines a reverse mapping from clients to a set of connections +func ClientConnectionsPath(clientID string) string { + return FullClientPath(clientID, KeyConnectionPrefix) +} + +// ClientConnectionsKey returns the store key for the connections of a given client +func ClientConnectionsKey(clientID string) []byte { + return []byte(ClientConnectionsPath(clientID)) +} + +// ConnectionPath defines the path under which connection paths are stored +func ConnectionPath(connectionID string) string { + return fmt.Sprintf("%s/%s", KeyConnectionPrefix, connectionID) +} + +// ConnectionKey returns the store key for a particular connection +func ConnectionKey(connectionID string) []byte { + return []byte(ConnectionPath(connectionID)) +} + +// ICS04 +// The following paths are the keys to the store as defined in https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#store-paths + +// ChannelPath defines the path under which channels are stored +func ChannelPath(portID, channelID string) string { + return fmt.Sprintf("%s/%s", KeyChannelEndPrefix, channelPath(portID, channelID)) +} + +// ChannelKey returns the store key for a particular channel +func ChannelKey(portID, channelID string) []byte { + return []byte(ChannelPath(portID, channelID)) +} + +// ChannelCapabilityPath defines the path under which capability keys associated +// with a channel are stored +func ChannelCapabilityPath(portID, channelID string) string { + return fmt.Sprintf("%s/%s", KeyChannelCapabilityPrefix, channelPath(portID, channelID)) +} + +// NextSequenceSendPath defines the next send sequence counter store path +func NextSequenceSendPath(portID, channelID string) string { + return fmt.Sprintf("%s/%s", KeyNextSeqSendPrefix, channelPath(portID, channelID)) +} + +// NextSequenceSendKey returns the store key for the send sequence of a particular +// channel binded to a specific port. +func NextSequenceSendKey(portID, channelID string) []byte { + return []byte(NextSequenceSendPath(portID, channelID)) +} + +// NextSequenceRecvPath defines the next receive sequence counter store path. +func NextSequenceRecvPath(portID, channelID string) string { + return fmt.Sprintf("%s/%s", KeyNextSeqRecvPrefix, channelPath(portID, channelID)) +} + +// NextSequenceRecvKey returns the store key for the receive sequence of a particular +// channel binded to a specific port +func NextSequenceRecvKey(portID, channelID string) []byte { + return []byte(NextSequenceRecvPath(portID, channelID)) +} + +// NextSequenceAckPath defines the next acknowledgement sequence counter store path +func NextSequenceAckPath(portID, channelID string) string { + return fmt.Sprintf("%s/%s", KeyNextSeqAckPrefix, channelPath(portID, channelID)) +} + +// NextSequenceAckKey returns the store key for the acknowledgement sequence of +// a particular channel binded to a specific port. +func NextSequenceAckKey(portID, channelID string) []byte { + return []byte(NextSequenceAckPath(portID, channelID)) +} + +// PacketCommitmentPath defines the commitments to packet data fields store path +func PacketCommitmentPath(portID, channelID string, sequence uint64) string { + return fmt.Sprintf("%s/%d", PacketCommitmentPrefixPath(portID, channelID), sequence) +} + +// PacketCommitmentKey returns the store key of under which a packet commitment +// is stored +func PacketCommitmentKey(portID, channelID string, sequence uint64) []byte { + return []byte(PacketCommitmentPath(portID, channelID, sequence)) +} + +// PacketCommitmentPrefixPath defines the prefix for commitments to packet data fields store path. +func PacketCommitmentPrefixPath(portID, channelID string) string { + return fmt.Sprintf("%s/%s/%s", KeyPacketCommitmentPrefix, channelPath(portID, channelID), KeySequencePrefix) +} + +// PacketAcknowledgementPath defines the packet acknowledgement store path +func PacketAcknowledgementPath(portID, channelID string, sequence uint64) string { + return fmt.Sprintf("%s/%d", PacketAcknowledgementPrefixPath(portID, channelID), sequence) +} + +// PacketAcknowledgementKey returns the store key of under which a packet +// acknowledgement is stored +func PacketAcknowledgementKey(portID, channelID string, sequence uint64) []byte { + return []byte(PacketAcknowledgementPath(portID, channelID, sequence)) +} + +// PacketAcknowledgementPrefixPath defines the prefix for commitments to packet data fields store path. +func PacketAcknowledgementPrefixPath(portID, channelID string) string { + return fmt.Sprintf("%s/%s/%s", KeyPacketAckPrefix, channelPath(portID, channelID), KeySequencePrefix) +} + +// PacketReceiptPath defines the packet receipt store path +func PacketReceiptPath(portID, channelID string, sequence uint64) string { + return fmt.Sprintf("%s/%s/%s", KeyPacketReceiptPrefix, channelPath(portID, channelID), sequencePath(sequence)) +} + +// PacketReceiptKey returns the store key of under which a packet +// receipt is stored +func PacketReceiptKey(portID, channelID string, sequence uint64) []byte { + return []byte(PacketReceiptPath(portID, channelID, sequence)) +} + +func channelPath(portID, channelID string) string { + return fmt.Sprintf("%s/%s/%s/%s", KeyPortPrefix, portID, KeyChannelPrefix, channelID) +} + +func sequencePath(sequence uint64) string { + return fmt.Sprintf("%s/%d", KeySequencePrefix, sequence) +} + +// ICS05 +// The following paths are the keys to the store as defined in https://github.com/cosmos/ics/tree/master/spec/ics-005-port-allocation#store-paths + +// PortPath defines the path under which ports paths are stored on the capability module +func PortPath(portID string) string { + return fmt.Sprintf("%s/%s", KeyPortPrefix, portID) +} diff --git a/core/24-host/parse.go b/core/24-host/parse.go new file mode 100644 index 0000000000..8c3459500d --- /dev/null +++ b/core/24-host/parse.go @@ -0,0 +1,79 @@ +package host + +import ( + "strconv" + "strings" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// ParseIdentifier parses the sequence from the identifier using the provided prefix. This function +// does not need to be used by counterparty chains. SDK generated connection and channel identifiers +// are required to use this format. +func ParseIdentifier(identifier, prefix string) (uint64, error) { + if !strings.HasPrefix(identifier, prefix) { + return 0, sdkerrors.Wrapf(ErrInvalidID, "identifier doesn't contain prefix `%s`", prefix) + } + + splitStr := strings.Split(identifier, prefix) + if len(splitStr) != 2 { + return 0, sdkerrors.Wrapf(ErrInvalidID, "identifier must be in format: `%s{N}`", prefix) + } + + // sanity check + if splitStr[0] != "" { + return 0, sdkerrors.Wrapf(ErrInvalidID, "identifier must begin with prefix %s", prefix) + } + + sequence, err := strconv.ParseUint(splitStr[1], 10, 64) + if err != nil { + return 0, sdkerrors.Wrap(err, "failed to parse identifier sequence") + } + return sequence, nil +} + +// ParseConnectionPath returns the connection ID from a full path. It returns +// an error if the provided path is invalid. +func ParseConnectionPath(path string) (string, error) { + split := strings.Split(path, "/") + if len(split) != 2 { + return "", sdkerrors.Wrapf(ErrInvalidPath, "cannot parse connection path %s", path) + } + + return split[1], nil +} + +// ParseChannelPath returns the port and channel ID from a full path. It returns +// an error if the provided path is invalid. +func ParseChannelPath(path string) (string, string, error) { + split := strings.Split(path, "/") + if len(split) < 5 { + return "", "", sdkerrors.Wrapf(ErrInvalidPath, "cannot parse channel path %s", path) + } + + if split[1] != KeyPortPrefix || split[3] != KeyChannelPrefix { + return "", "", sdkerrors.Wrapf(ErrInvalidPath, "cannot parse channel path %s", path) + } + + return split[2], split[4], nil +} + +// MustParseConnectionPath returns the connection ID from a full path. Panics +// if the provided path is invalid. +func MustParseConnectionPath(path string) string { + connectionID, err := ParseConnectionPath(path) + if err != nil { + panic(err) + } + return connectionID +} + +// MustParseChannelPath returns the port and channel ID from a full path. Panics +// if the provided path is invalid. +func MustParseChannelPath(path string) (string, string) { + portID, channelID, err := ParseChannelPath(path) + if err != nil { + panic(err) + } + return portID, channelID +} diff --git a/core/24-host/parse_test.go b/core/24-host/parse_test.go new file mode 100644 index 0000000000..9f74bf5f68 --- /dev/null +++ b/core/24-host/parse_test.go @@ -0,0 +1,48 @@ +package host_test + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +func TestParseIdentifier(t *testing.T) { + testCases := []struct { + name string + identifier string + prefix string + expSeq uint64 + expPass bool + }{ + {"valid 0", "connection-0", "connection-", 0, true}, + {"valid 1", "connection-1", "connection-", 1, true}, + {"valid large sequence", connectiontypes.FormatConnectionIdentifier(math.MaxUint64), "connection-", math.MaxUint64, true}, + // one above uint64 max + {"invalid uint64", "connection-18446744073709551616", "connection-", 0, false}, + // uint64 == 20 characters + {"invalid large sequence", "connection-2345682193567182931243", "connection-", 0, false}, + {"capital prefix", "Connection-0", "connection-", 0, false}, + {"double prefix", "connection-connection-0", "connection-", 0, false}, + {"doesn't have prefix", "connection-0", "prefix", 0, false}, + {"missing dash", "connection0", "connection-", 0, false}, + {"blank id", " ", "connection-", 0, false}, + {"empty id", "", "connection-", 0, false}, + {"negative sequence", "connection--1", "connection-", 0, false}, + } + + for _, tc := range testCases { + + seq, err := host.ParseIdentifier(tc.identifier, tc.prefix) + require.Equal(t, tc.expSeq, seq) + + if tc.expPass { + require.NoError(t, err, tc.name) + } else { + require.Error(t, err, tc.name) + } + } +} diff --git a/core/24-host/validate.go b/core/24-host/validate.go new file mode 100644 index 0000000000..10458e8d3a --- /dev/null +++ b/core/24-host/validate.go @@ -0,0 +1,114 @@ +package host + +import ( + "regexp" + "strings" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// DefaultMaxCharacterLength defines the default maximum character length used +// in validation of identifiers including the client, connection, port and +// channel identifiers. +// +// NOTE: this restriction is specific to this golang implementation of IBC. If +// your use case demands a higher limit, please open an issue and we will consider +// adjusting this restriction. +const DefaultMaxCharacterLength = 64 + +// IsValidID defines regular expression to check if the string consist of +// characters in one of the following categories only: +// - Alphanumeric +// - `.`, `_`, `+`, `-`, `#` +// - `[`, `]`, `<`, `>` +var IsValidID = regexp.MustCompile(`^[a-zA-Z0-9\.\_\+\-\#\[\]\<\>]+$`).MatchString + +// ICS 024 Identifier and Path Validation Implementation +// +// This file defines ValidateFn to validate identifier and path strings +// The spec for ICS 024 can be located here: +// https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements + +// ValidateFn function type to validate path and identifier bytestrings +type ValidateFn func(string) error + +func defaultIdentifierValidator(id string, min, max int) error { //nolint:unparam + if strings.TrimSpace(id) == "" { + return sdkerrors.Wrap(ErrInvalidID, "identifier cannot be blank") + } + // valid id MUST NOT contain "/" separator + if strings.Contains(id, "/") { + return sdkerrors.Wrapf(ErrInvalidID, "identifier %s cannot contain separator '/'", id) + } + // valid id must fit the length requirements + if len(id) < min || len(id) > max { + return sdkerrors.Wrapf(ErrInvalidID, "identifier %s has invalid length: %d, must be between %d-%d characters", id, len(id), min, max) + } + // valid id must contain only lower alphabetic characters + if !IsValidID(id) { + return sdkerrors.Wrapf( + ErrInvalidID, + "identifier %s must contain only alphanumeric or the following characters: '.', '_', '+', '-', '#', '[', ']', '<', '>'", + id, + ) + } + return nil +} + +// ClientIdentifierValidator is the default validator function for Client identifiers. +// A valid Identifier must be between 9-64 characters and only contain alphanumeric and some allowed +// special characters (see IsValidID). +func ClientIdentifierValidator(id string) error { + return defaultIdentifierValidator(id, 9, DefaultMaxCharacterLength) +} + +// ConnectionIdentifierValidator is the default validator function for Connection identifiers. +// A valid Identifier must be between 10-64 characters and only contain alphanumeric and some allowed +// special characters (see IsValidID). +func ConnectionIdentifierValidator(id string) error { + return defaultIdentifierValidator(id, 10, DefaultMaxCharacterLength) +} + +// ChannelIdentifierValidator is the default validator function for Channel identifiers. +// A valid Identifier must be between 8-64 characters and only contain alphanumeric and some allowed +// special characters (see IsValidID). +func ChannelIdentifierValidator(id string) error { + return defaultIdentifierValidator(id, 8, DefaultMaxCharacterLength) +} + +// PortIdentifierValidator is the default validator function for Port identifiers. +// A valid Identifier must be between 2-64 characters and only contain alphanumeric and some allowed +// special characters (see IsValidID). +func PortIdentifierValidator(id string) error { + return defaultIdentifierValidator(id, 2, DefaultMaxCharacterLength) +} + +// NewPathValidator takes in a Identifier Validator function and returns +// a Path Validator function which requires path to consist of `/`-separated valid identifiers, +// where a valid identifier is between 1-64 characters, contains only alphanumeric and some allowed +// special characters (see IsValidID), and satisfies the custom `idValidator` function. +func NewPathValidator(idValidator ValidateFn) ValidateFn { + return func(path string) error { + pathArr := strings.Split(path, "/") + if len(pathArr) > 0 && pathArr[0] == path { + return sdkerrors.Wrapf(ErrInvalidPath, "path %s doesn't contain any separator '/'", path) + } + + for _, p := range pathArr { + // a path beginning or ending in a separator returns empty string elements. + if p == "" { + return sdkerrors.Wrapf(ErrInvalidPath, "path %s cannot begin or end with '/'", path) + } + + if err := idValidator(p); err != nil { + return err + } + // Each path element must either be a valid identifier or constant number + if err := defaultIdentifierValidator(p, 1, DefaultMaxCharacterLength); err != nil { + return sdkerrors.Wrapf(err, "path %s contains an invalid identifier: '%s'", path, p) + } + } + + return nil + } +} diff --git a/core/24-host/validate_test.go b/core/24-host/validate_test.go new file mode 100644 index 0000000000..40987bd157 --- /dev/null +++ b/core/24-host/validate_test.go @@ -0,0 +1,119 @@ +package host + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +type testCase struct { + msg string + id string + expPass bool +} + +func TestDefaultIdentifierValidator(t *testing.T) { + testCases := []testCase{ + {"valid lowercase", "lowercaseid", true}, + {"valid id special chars", "._+-#[]<>._+-#[]<>", true}, + {"valid id lower and special chars", "lower._+-#[]<>", true}, + {"numeric id", "1234567890", true}, + {"uppercase id", "NOTLOWERCASE", true}, + {"numeric id", "1234567890", true}, + {"blank id", " ", false}, + {"id length out of range", "1", false}, + {"id is too long", "this identifier is too long to be used as a valid identifier", false}, + {"path-like id", "lower/case/id", false}, + {"invalid id", "(clientid)", false}, + {"empty string", "", false}, + } + + for _, tc := range testCases { + + err := ClientIdentifierValidator(tc.id) + err1 := ConnectionIdentifierValidator(tc.id) + err2 := ChannelIdentifierValidator(tc.id) + err3 := PortIdentifierValidator(tc.id) + if tc.expPass { + require.NoError(t, err, tc.msg) + require.NoError(t, err1, tc.msg) + require.NoError(t, err2, tc.msg) + require.NoError(t, err3, tc.msg) + } else { + require.Error(t, err, tc.msg) + require.Error(t, err1, tc.msg) + require.Error(t, err2, tc.msg) + require.Error(t, err3, tc.msg) + } + } +} + +func TestPathValidator(t *testing.T) { + testCases := []testCase{ + {"valid lowercase", "p/lowercaseid", true}, + {"numeric path", "p/239123", true}, + {"valid id special chars", "p/._+-#[]<>._+-#[]<>", true}, + {"valid id lower and special chars", "lower/._+-#[]<>", true}, + {"id length out of range", "p/l", true}, + {"uppercase id", "p/NOTLOWERCASE", true}, + {"invalid path", "lowercaseid", false}, + {"blank id", "p/ ", false}, + {"id length out of range", "p/12345678901234567890123456789012345678901234567890123456789012345", false}, + {"invalid id", "p/(clientid)", false}, + {"empty string", "", false}, + {"separators only", "////", false}, + {"just separator", "/", false}, + {"begins with separator", "/id", false}, + {"blank before separator", " /id", false}, + {"ends with separator", "id/", false}, + {"blank after separator", "id/ ", false}, + {"blanks with separator", " / ", false}, + } + + for _, tc := range testCases { + f := NewPathValidator(func(path string) error { + return nil + }) + + err := f(tc.id) + + if tc.expPass { + seps := strings.Count(tc.id, "/") + require.Equal(t, 1, seps) + require.NoError(t, err, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} + +func TestCustomPathValidator(t *testing.T) { + validateFn := NewPathValidator(func(path string) error { + if !strings.HasPrefix(path, "id_") { + return fmt.Errorf("identifier %s must start with 'id_", path) + } + return nil + }) + + testCases := []testCase{ + {"valid custom path", "id_client/id_one", true}, + {"invalid path", "client", false}, + {"invalid custom path", "id_one/client", false}, + {"invalid identifier", "id_client/id_1234567890123456789012345678901234567890123457890123456789012345", false}, + {"separators only", "////", false}, + {"just separator", "/", false}, + {"ends with separator", "id_client/id_one/", false}, + {"beings with separator", "/id_client/id_one", false}, + } + + for _, tc := range testCases { + err := validateFn(tc.id) + if tc.expPass { + require.NoError(t, err, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} diff --git a/core/client/cli/cli.go b/core/client/cli/cli.go new file mode 100644 index 0000000000..bda4123be0 --- /dev/null +++ b/core/client/cli/cli.go @@ -0,0 +1,50 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client" + connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection" + channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + ibcTxCmd := &cobra.Command{ + Use: host.ModuleName, + Short: "IBC transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + ibcTxCmd.AddCommand( + ibcclient.GetTxCmd(), + connection.GetTxCmd(), + channel.GetTxCmd(), + ) + + return ibcTxCmd +} + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd() *cobra.Command { + // Group ibc queries under a subcommand + ibcQueryCmd := &cobra.Command{ + Use: host.ModuleName, + Short: "Querying commands for the IBC module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + ibcQueryCmd.AddCommand( + ibcclient.GetQueryCmd(), + connection.GetQueryCmd(), + channel.GetQueryCmd(), + ) + + return ibcQueryCmd +} diff --git a/core/client/query.go b/core/client/query.go new file mode 100644 index 0000000000..7055f1c740 --- /dev/null +++ b/core/client/query.go @@ -0,0 +1,67 @@ +package client + +import ( + "fmt" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +// QueryTendermintProof performs an ABCI query with the given key and returns +// the value of the query, the proto encoded merkle proof, and the height of +// the Tendermint block containing the state root. The desired tendermint height +// to perform the query should be set in the client context. The query will be +// performed at one below this height (at the IAVL version) in order to obtain +// the correct merkle proof. Proof queries at height less than or equal to 2 are +// not supported. Queries with a client context height of 0 will perform a query +// at the lastest state available. +// Issue: https://github.com/cosmos/cosmos-sdk/issues/6567 +func QueryTendermintProof(clientCtx client.Context, key []byte) ([]byte, []byte, clienttypes.Height, error) { + height := clientCtx.Height + + // ABCI queries at heights 1, 2 or less than or equal to 0 are not supported. + // Base app does not support queries for height less than or equal to 1. + // Therefore, a query at height 2 would be equivalent to a query at height 3. + // A height of 0 will query with the lastest state. + if height != 0 && height <= 2 { + return nil, nil, clienttypes.Height{}, fmt.Errorf("proof queries at height <= 2 are not supported") + } + + // Use the IAVL height if a valid tendermint height is passed in. + // A height of 0 will query with the latest state. + if height != 0 { + height-- + } + + req := abci.RequestQuery{ + Path: fmt.Sprintf("store/%s/key", host.StoreKey), + Height: height, + Data: key, + Prove: true, + } + + res, err := clientCtx.QueryABCI(req) + if err != nil { + return nil, nil, clienttypes.Height{}, err + } + + merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) + if err != nil { + return nil, nil, clienttypes.Height{}, err + } + + cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) + + proofBz, err := cdc.MarshalBinaryBare(&merkleProof) + if err != nil { + return nil, nil, clienttypes.Height{}, err + } + + revision := clienttypes.ParseChainID(clientCtx.ChainID) + return res.Value, proofBz, clienttypes.NewHeight(revision, uint64(res.Height)+1), nil +} diff --git a/core/exported/channel.go b/core/exported/channel.go new file mode 100644 index 0000000000..6a0d542c1e --- /dev/null +++ b/core/exported/channel.go @@ -0,0 +1,32 @@ +package exported + +// ChannelI defines the standard interface for a channel end. +type ChannelI interface { + GetState() int32 + GetOrdering() int32 + GetCounterparty() CounterpartyChannelI + GetConnectionHops() []string + GetVersion() string + ValidateBasic() error +} + +// CounterpartyChannelI defines the standard interface for a channel end's +// counterparty. +type CounterpartyChannelI interface { + GetPortID() string + GetChannelID() string + ValidateBasic() error +} + +// PacketI defines the standard interface for IBC packets +type PacketI interface { + GetSequence() uint64 + GetTimeoutHeight() Height + GetTimeoutTimestamp() uint64 + GetSourcePort() string + GetSourceChannel() string + GetDestPort() string + GetDestChannel() string + GetData() []byte + ValidateBasic() error +} diff --git a/core/exported/client.go b/core/exported/client.go new file mode 100644 index 0000000000..3d552b0772 --- /dev/null +++ b/core/exported/client.go @@ -0,0 +1,223 @@ +package exported + +import ( + ics23 "github.com/confio/ics23/go" + proto "github.com/gogo/protobuf/proto" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + // TypeClientMisbehaviour is the shared evidence misbehaviour type + TypeClientMisbehaviour string = "client_misbehaviour" + + // Solomachine is used to indicate that the light client is a solo machine. + Solomachine string = "06-solomachine" + + // Tendermint is used to indicate that the client uses the Tendermint Consensus Algorithm. + Tendermint string = "07-tendermint" + + // Localhost is the client type for a localhost client. It is also used as the clientID + // for the localhost client. + Localhost string = "09-localhost" +) + +// ClientState defines the required common functions for light clients. +type ClientState interface { + proto.Message + + ClientType() string + GetLatestHeight() Height + IsFrozen() bool + GetFrozenHeight() Height + Validate() error + GetProofSpecs() []*ics23.ProofSpec + + // Initialization function + // Clients must validate the initial consensus state, and may store any client-specific metadata + // necessary for correct light client operation + Initialize(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, ConsensusState) error + + // Genesis function + ExportMetadata(sdk.KVStore) []GenesisMetadata + + // Update and Misbehaviour functions + + CheckHeaderAndUpdateState(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, Header) (ClientState, ConsensusState, error) + CheckMisbehaviourAndUpdateState(sdk.Context, codec.BinaryMarshaler, sdk.KVStore, Misbehaviour) (ClientState, error) + CheckSubstituteAndUpdateState(ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore, substituteClientStore sdk.KVStore, substituteClient ClientState, height Height) (ClientState, error) + + // Upgrade functions + // NOTE: proof heights are not included as upgrade to a new revision is expected to pass only on the last + // height committed by the current revision. Clients are responsible for ensuring that the planned last + // height of the current revision is somehow encoded in the proof verification process. + // This is to ensure that no premature upgrades occur, since upgrade plans committed to by the counterparty + // may be cancelled or modified before the last planned height. + VerifyUpgradeAndUpdateState( + ctx sdk.Context, + cdc codec.BinaryMarshaler, + store sdk.KVStore, + newClient ClientState, + newConsState ConsensusState, + proofUpgradeClient, + proofUpgradeConsState []byte, + ) (ClientState, ConsensusState, error) + // Utility function that zeroes out any client customizable fields in client state + // Ledger enforced fields are maintained while all custom fields are zero values + // Used to verify upgrades + ZeroCustomFields() ClientState + + // State verification functions + + VerifyClientState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height Height, + prefix Prefix, + counterpartyClientIdentifier string, + proof []byte, + clientState ClientState, + ) error + VerifyClientConsensusState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height Height, + counterpartyClientIdentifier string, + consensusHeight Height, + prefix Prefix, + proof []byte, + consensusState ConsensusState, + ) error + VerifyConnectionState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height Height, + prefix Prefix, + proof []byte, + connectionID string, + connectionEnd ConnectionI, + ) error + VerifyChannelState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height Height, + prefix Prefix, + proof []byte, + portID, + channelID string, + channel ChannelI, + ) error + VerifyPacketCommitment( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height Height, + currentTimestamp uint64, + delayPeriod uint64, + prefix Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, + commitmentBytes []byte, + ) error + VerifyPacketAcknowledgement( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height Height, + currentTimestamp uint64, + delayPeriod uint64, + prefix Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, + acknowledgement []byte, + ) error + VerifyPacketReceiptAbsence( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height Height, + currentTimestamp uint64, + delayPeriod uint64, + prefix Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, + ) error + VerifyNextSequenceRecv( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height Height, + currentTimestamp uint64, + delayPeriod uint64, + prefix Prefix, + proof []byte, + portID, + channelID string, + nextSequenceRecv uint64, + ) error +} + +// ConsensusState is the state of the consensus process +type ConsensusState interface { + proto.Message + + ClientType() string // Consensus kind + + // GetRoot returns the commitment root of the consensus state, + // which is used for key-value pair verification. + GetRoot() Root + + // GetTimestamp returns the timestamp (in nanoseconds) of the consensus state + GetTimestamp() uint64 + + ValidateBasic() error +} + +// Misbehaviour defines counterparty misbehaviour for a specific consensus type +type Misbehaviour interface { + proto.Message + + ClientType() string + GetClientID() string + ValidateBasic() error + + // Height at which the infraction occurred + GetHeight() Height +} + +// Header is the consensus state update information +type Header interface { + proto.Message + + ClientType() string + GetHeight() Height + ValidateBasic() error +} + +// Height is a wrapper interface over clienttypes.Height +// all clients must use the concrete implementation in types +type Height interface { + IsZero() bool + LT(Height) bool + LTE(Height) bool + EQ(Height) bool + GT(Height) bool + GTE(Height) bool + GetRevisionNumber() uint64 + GetRevisionHeight() uint64 + Increment() Height + Decrement() (Height, bool) + String() string +} + +// GenesisMetadata is a wrapper interface over clienttypes.GenesisMetadata +// all clients must use the concrete implementation in types +type GenesisMetadata interface { + // return store key that contains metadata without clientID-prefix + GetKey() []byte + // returns metadata value + GetValue() []byte +} diff --git a/core/exported/commitment.go b/core/exported/commitment.go new file mode 100644 index 0000000000..b4f2c0c18f --- /dev/null +++ b/core/exported/commitment.go @@ -0,0 +1,45 @@ +package exported + +import ics23 "github.com/confio/ics23/go" + +// ICS 023 Types Implementation +// +// This file includes types defined under +// https://github.com/cosmos/ics/tree/master/spec/ics-023-vector-commitments + +// spec:Path and spec:Value are defined as bytestring + +// Root implements spec:CommitmentRoot. +// A root is constructed from a set of key-value pairs, +// and the inclusion or non-inclusion of an arbitrary key-value pair +// can be proven with the proof. +type Root interface { + GetHash() []byte + Empty() bool +} + +// Prefix implements spec:CommitmentPrefix. +// Prefix represents the common "prefix" that a set of keys shares. +type Prefix interface { + Bytes() []byte + Empty() bool +} + +// Path implements spec:CommitmentPath. +// A path is the additional information provided to the verification function. +type Path interface { + String() string + Empty() bool +} + +// Proof implements spec:CommitmentProof. +// Proof can prove whether the key-value pair is a part of the Root or not. +// Each proof has designated key-value pair it is able to prove. +// Proofs includes key but value is provided dynamically at the verification time. +type Proof interface { + VerifyMembership([]*ics23.ProofSpec, Root, Path, []byte) error + VerifyNonMembership([]*ics23.ProofSpec, Root, Path) error + Empty() bool + + ValidateBasic() error +} diff --git a/core/exported/connection.go b/core/exported/connection.go new file mode 100644 index 0000000000..8f705daff1 --- /dev/null +++ b/core/exported/connection.go @@ -0,0 +1,26 @@ +package exported + +// ConnectionI describes the required methods for a connection. +type ConnectionI interface { + GetClientID() string + GetState() int32 + GetCounterparty() CounterpartyConnectionI + GetVersions() []Version + GetDelayPeriod() uint64 + ValidateBasic() error +} + +// CounterpartyConnectionI describes the required methods for a counterparty connection. +type CounterpartyConnectionI interface { + GetClientID() string + GetConnectionID() string + GetPrefix() Prefix + ValidateBasic() error +} + +// Version defines an IBC version used in connection handshake negotiation. +type Version interface { + GetIdentifier() string + GetFeatures() []string + VerifyProposedVersion(Version) error +} diff --git a/core/genesis.go b/core/genesis.go new file mode 100644 index 0000000000..7d5d60b934 --- /dev/null +++ b/core/genesis.go @@ -0,0 +1,27 @@ +package ibc + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client" + connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection" + channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel" + "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/types" +) + +// InitGenesis initializes the ibc state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, createLocalhost bool, gs *types.GenesisState) { + client.InitGenesis(ctx, k.ClientKeeper, gs.ClientGenesis) + connection.InitGenesis(ctx, k.ConnectionKeeper, gs.ConnectionGenesis) + channel.InitGenesis(ctx, k.ChannelKeeper, gs.ChannelGenesis) +} + +// ExportGenesis returns the ibc exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + return &types.GenesisState{ + ClientGenesis: client.ExportGenesis(ctx, k.ClientKeeper), + ConnectionGenesis: connection.ExportGenesis(ctx, k.ConnectionKeeper), + ChannelGenesis: channel.ExportGenesis(ctx, k.ChannelKeeper), + } +} diff --git a/core/genesis_test.go b/core/genesis_test.go new file mode 100644 index 0000000000..c29feef7f8 --- /dev/null +++ b/core/genesis_test.go @@ -0,0 +1,370 @@ +package ibc_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/simapp" + ibc "github.com/cosmos/cosmos-sdk/x/ibc/core" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/core/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +const ( + connectionID = "connection-0" + clientID = "07-tendermint-0" + connectionID2 = "connection-1" + clientID2 = "07-tendermin-1" + localhostID = exported.Localhost + "-1" + + port1 = "firstport" + port2 = "secondport" + + channel1 = "channel-0" + channel2 = "channel-1" +) + +var clientHeight = clienttypes.NewHeight(0, 10) + +type IBCTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +// SetupTest creates a coordinator with 2 test chains. +func (suite *IBCTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +} + +func TestIBCTestSuite(t *testing.T) { + suite.Run(t, new(IBCTestSuite)) +} + +func (suite *IBCTestSuite) TestValidateGenesis() { + header := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, suite.chainA.CurrentHeader.Height, clienttypes.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height-1)), suite.chainA.CurrentHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) + + testCases := []struct { + name string + genState *types.GenesisState + expPass bool + }{ + { + name: "default", + genState: types.DefaultGenesisState(), + expPass: true, + }, + { + name: "valid genesis", + genState: &types.GenesisState{ + ClientGenesis: clienttypes.NewGenesisState( + []clienttypes.IdentifiedClientState{ + clienttypes.NewIdentifiedClientState( + clientID, ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + clienttypes.NewIdentifiedClientState( + localhostID, localhosttypes.NewClientState("chaindID", clientHeight), + ), + }, + []clienttypes.ClientConsensusStates{ + clienttypes.NewClientConsensusStates( + clientID, + []clienttypes.ConsensusStateWithHeight{ + clienttypes.NewConsensusStateWithHeight( + header.GetHeight().(clienttypes.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.AppHash), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + []clienttypes.IdentifiedGenesisMetadata{ + clienttypes.NewIdentifiedGenesisMetadata( + clientID, + []clienttypes.GenesisMetadata{ + clienttypes.NewGenesisMetadata([]byte("key1"), []byte("val1")), + clienttypes.NewGenesisMetadata([]byte("key2"), []byte("val2")), + }, + ), + }, + clienttypes.NewParams(exported.Tendermint, exported.Localhost), + true, + 2, + ), + ConnectionGenesis: connectiontypes.NewGenesisState( + []connectiontypes.IdentifiedConnection{ + connectiontypes.NewIdentifiedConnection(connectionID, connectiontypes.NewConnectionEnd(connectiontypes.INIT, clientID, connectiontypes.NewCounterparty(clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))), []*connectiontypes.Version{ibctesting.ConnectionVersion}, 0)), + }, + []connectiontypes.ConnectionPaths{ + connectiontypes.NewConnectionPaths(clientID, []string{connectionID}), + }, + 0, + ), + ChannelGenesis: channeltypes.NewGenesisState( + []channeltypes.IdentifiedChannel{ + channeltypes.NewIdentifiedChannel( + port1, channel1, channeltypes.NewChannel( + channeltypes.INIT, channeltypes.ORDERED, + channeltypes.NewCounterparty(port2, channel2), []string{connectionID}, ibctesting.DefaultChannelVersion, + ), + ), + }, + []channeltypes.PacketState{ + channeltypes.NewPacketState(port2, channel2, 1, []byte("ack")), + }, + []channeltypes.PacketState{ + channeltypes.NewPacketState(port2, channel2, 1, []byte("")), + }, + []channeltypes.PacketState{ + channeltypes.NewPacketState(port1, channel1, 1, []byte("commit_hash")), + }, + []channeltypes.PacketSequence{ + channeltypes.NewPacketSequence(port1, channel1, 1), + }, + []channeltypes.PacketSequence{ + channeltypes.NewPacketSequence(port2, channel2, 1), + }, + []channeltypes.PacketSequence{ + channeltypes.NewPacketSequence(port2, channel2, 1), + }, + 0, + ), + }, + expPass: true, + }, + { + name: "invalid client genesis", + genState: &types.GenesisState{ + ClientGenesis: clienttypes.NewGenesisState( + []clienttypes.IdentifiedClientState{ + clienttypes.NewIdentifiedClientState( + clientID, ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + clienttypes.NewIdentifiedClientState( + localhostID, localhosttypes.NewClientState("(chaindID)", clienttypes.ZeroHeight()), + ), + }, + nil, + []clienttypes.IdentifiedGenesisMetadata{ + clienttypes.NewIdentifiedGenesisMetadata( + clientID, + []clienttypes.GenesisMetadata{ + clienttypes.NewGenesisMetadata([]byte(""), []byte("val1")), + clienttypes.NewGenesisMetadata([]byte("key2"), []byte("")), + }, + ), + }, + clienttypes.NewParams(exported.Tendermint), + false, + 2, + ), + ConnectionGenesis: connectiontypes.DefaultGenesisState(), + }, + expPass: false, + }, + { + name: "invalid connection genesis", + genState: &types.GenesisState{ + ClientGenesis: clienttypes.DefaultGenesisState(), + ConnectionGenesis: connectiontypes.NewGenesisState( + []connectiontypes.IdentifiedConnection{ + connectiontypes.NewIdentifiedConnection(connectionID, connectiontypes.NewConnectionEnd(connectiontypes.INIT, "(CLIENTIDONE)", connectiontypes.NewCounterparty(clientID, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))), []*connectiontypes.Version{connectiontypes.NewVersion("1.1", nil)}, 0)), + }, + []connectiontypes.ConnectionPaths{ + connectiontypes.NewConnectionPaths(clientID, []string{connectionID}), + }, + 0, + ), + }, + expPass: false, + }, + { + name: "invalid channel genesis", + genState: &types.GenesisState{ + ClientGenesis: clienttypes.DefaultGenesisState(), + ConnectionGenesis: connectiontypes.DefaultGenesisState(), + ChannelGenesis: channeltypes.GenesisState{ + Acknowledgements: []channeltypes.PacketState{ + channeltypes.NewPacketState("(portID)", channel1, 1, []byte("ack")), + }, + }, + }, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + err := tc.genState.Validate() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *IBCTestSuite) TestInitGenesis() { + header := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, suite.chainA.CurrentHeader.Height, clienttypes.NewHeight(0, uint64(suite.chainA.CurrentHeader.Height-1)), suite.chainA.CurrentHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) + + testCases := []struct { + name string + genState *types.GenesisState + }{ + { + name: "default", + genState: types.DefaultGenesisState(), + }, + { + name: "valid genesis", + genState: &types.GenesisState{ + ClientGenesis: clienttypes.NewGenesisState( + []clienttypes.IdentifiedClientState{ + clienttypes.NewIdentifiedClientState( + clientID, ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false), + ), + clienttypes.NewIdentifiedClientState( + exported.Localhost, localhosttypes.NewClientState("chaindID", clientHeight), + ), + }, + []clienttypes.ClientConsensusStates{ + clienttypes.NewClientConsensusStates( + clientID, + []clienttypes.ConsensusStateWithHeight{ + clienttypes.NewConsensusStateWithHeight( + header.GetHeight().(clienttypes.Height), + ibctmtypes.NewConsensusState( + header.GetTime(), commitmenttypes.NewMerkleRoot(header.Header.AppHash), header.Header.NextValidatorsHash, + ), + ), + }, + ), + }, + []clienttypes.IdentifiedGenesisMetadata{ + clienttypes.NewIdentifiedGenesisMetadata( + clientID, + []clienttypes.GenesisMetadata{ + clienttypes.NewGenesisMetadata([]byte("key1"), []byte("val1")), + clienttypes.NewGenesisMetadata([]byte("key2"), []byte("val2")), + }, + ), + }, + clienttypes.NewParams(exported.Tendermint, exported.Localhost), + true, + 0, + ), + ConnectionGenesis: connectiontypes.NewGenesisState( + []connectiontypes.IdentifiedConnection{ + connectiontypes.NewIdentifiedConnection(connectionID, connectiontypes.NewConnectionEnd(connectiontypes.INIT, clientID, connectiontypes.NewCounterparty(clientID2, connectionID2, commitmenttypes.NewMerklePrefix([]byte("prefix"))), []*connectiontypes.Version{ibctesting.ConnectionVersion}, 0)), + }, + []connectiontypes.ConnectionPaths{ + connectiontypes.NewConnectionPaths(clientID, []string{connectionID}), + }, + 0, + ), + ChannelGenesis: channeltypes.NewGenesisState( + []channeltypes.IdentifiedChannel{ + channeltypes.NewIdentifiedChannel( + port1, channel1, channeltypes.NewChannel( + channeltypes.INIT, channeltypes.ORDERED, + channeltypes.NewCounterparty(port2, channel2), []string{connectionID}, ibctesting.DefaultChannelVersion, + ), + ), + }, + []channeltypes.PacketState{ + channeltypes.NewPacketState(port2, channel2, 1, []byte("ack")), + }, + []channeltypes.PacketState{ + channeltypes.NewPacketState(port2, channel2, 1, []byte("")), + }, + []channeltypes.PacketState{ + channeltypes.NewPacketState(port1, channel1, 1, []byte("commit_hash")), + }, + []channeltypes.PacketSequence{ + channeltypes.NewPacketSequence(port1, channel1, 1), + }, + []channeltypes.PacketSequence{ + channeltypes.NewPacketSequence(port2, channel2, 1), + }, + []channeltypes.PacketSequence{ + channeltypes.NewPacketSequence(port2, channel2, 1), + }, + 0, + ), + }, + }, + } + + for _, tc := range testCases { + app := simapp.Setup(false) + + suite.NotPanics(func() { + ibc.InitGenesis(app.BaseApp.NewContext(false, tmproto.Header{Height: 1}), *app.IBCKeeper, true, tc.genState) + }) + } +} + +func (suite *IBCTestSuite) TestExportGenesis() { + testCases := []struct { + msg string + malleate func() + }{ + { + "success", + func() { + // creates clients + suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + // create extra clients + suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint) + suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint) + }, + }, + } + + for _, tc := range testCases { + suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + suite.SetupTest() + + tc.malleate() + + var gs *types.GenesisState + suite.NotPanics(func() { + gs = ibc.ExportGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper) + }) + + // init genesis based on export + suite.NotPanics(func() { + ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper, true, gs) + }) + + suite.NotPanics(func() { + cdc := codec.NewProtoCodec(suite.chainA.App.InterfaceRegistry()) + genState := cdc.MustMarshalJSON(gs) + cdc.MustUnmarshalJSON(genState, gs) + }) + + // init genesis based on marshal and unmarshal + suite.NotPanics(func() { + ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.IBCKeeper, true, gs) + }) + }) + } +} diff --git a/core/handler.go b/core/handler.go new file mode 100644 index 0000000000..c8e4dfc898 --- /dev/null +++ b/core/handler.go @@ -0,0 +1,98 @@ +package ibc + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper" +) + +// NewHandler defines the IBC handler +func NewHandler(k keeper.Keeper) sdk.Handler { + return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + ctx = ctx.WithEventManager(sdk.NewEventManager()) + + switch msg := msg.(type) { + // IBC client msg interface types + case *clienttypes.MsgCreateClient: + res, err := k.CreateClient(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *clienttypes.MsgUpdateClient: + res, err := k.UpdateClient(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *clienttypes.MsgUpgradeClient: + res, err := k.UpgradeClient(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *clienttypes.MsgSubmitMisbehaviour: + res, err := k.SubmitMisbehaviour(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + // IBC connection msgs + case *connectiontypes.MsgConnectionOpenInit: + res, err := k.ConnectionOpenInit(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *connectiontypes.MsgConnectionOpenTry: + res, err := k.ConnectionOpenTry(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *connectiontypes.MsgConnectionOpenAck: + res, err := k.ConnectionOpenAck(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *connectiontypes.MsgConnectionOpenConfirm: + res, err := k.ConnectionOpenConfirm(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + // IBC channel msgs + case *channeltypes.MsgChannelOpenInit: + res, err := k.ChannelOpenInit(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *channeltypes.MsgChannelOpenTry: + res, err := k.ChannelOpenTry(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *channeltypes.MsgChannelOpenAck: + res, err := k.ChannelOpenAck(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *channeltypes.MsgChannelOpenConfirm: + res, err := k.ChannelOpenConfirm(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *channeltypes.MsgChannelCloseInit: + res, err := k.ChannelCloseInit(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *channeltypes.MsgChannelCloseConfirm: + res, err := k.ChannelCloseConfirm(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + // IBC packet msgs get routed to the appropriate module callback + case *channeltypes.MsgRecvPacket: + res, err := k.RecvPacket(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *channeltypes.MsgAcknowledgement: + res, err := k.Acknowledgement(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *channeltypes.MsgTimeout: + res, err := k.Timeout(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + case *channeltypes.MsgTimeoutOnClose: + res, err := k.TimeoutOnClose(sdk.WrapSDKContext(ctx), msg) + return sdk.WrapServiceResult(ctx, res, err) + + default: + return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized IBC message type: %T", msg) + } + } +} diff --git a/core/keeper/grpc_query.go b/core/keeper/grpc_query.go new file mode 100644 index 0000000000..f406d2e86f --- /dev/null +++ b/core/keeper/grpc_query.go @@ -0,0 +1,124 @@ +package keeper + +import ( + "context" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// ClientState implements the IBC QueryServer interface +func (q Keeper) ClientState(c context.Context, req *clienttypes.QueryClientStateRequest) (*clienttypes.QueryClientStateResponse, error) { + return q.ClientKeeper.ClientState(c, req) +} + +// ClientStates implements the IBC QueryServer interface +func (q Keeper) ClientStates(c context.Context, req *clienttypes.QueryClientStatesRequest) (*clienttypes.QueryClientStatesResponse, error) { + return q.ClientKeeper.ClientStates(c, req) +} + +// ConsensusState implements the IBC QueryServer interface +func (q Keeper) ConsensusState(c context.Context, req *clienttypes.QueryConsensusStateRequest) (*clienttypes.QueryConsensusStateResponse, error) { + return q.ClientKeeper.ConsensusState(c, req) +} + +// ConsensusStates implements the IBC QueryServer interface +func (q Keeper) ConsensusStates(c context.Context, req *clienttypes.QueryConsensusStatesRequest) (*clienttypes.QueryConsensusStatesResponse, error) { + return q.ClientKeeper.ConsensusStates(c, req) +} + +// ClientParams implements the IBC QueryServer interface +func (q Keeper) ClientParams(c context.Context, req *clienttypes.QueryClientParamsRequest) (*clienttypes.QueryClientParamsResponse, error) { + return q.ClientKeeper.ClientParams(c, req) +} + +// Connection implements the IBC QueryServer interface +func (q Keeper) Connection(c context.Context, req *connectiontypes.QueryConnectionRequest) (*connectiontypes.QueryConnectionResponse, error) { + return q.ConnectionKeeper.Connection(c, req) +} + +// Connections implements the IBC QueryServer interface +func (q Keeper) Connections(c context.Context, req *connectiontypes.QueryConnectionsRequest) (*connectiontypes.QueryConnectionsResponse, error) { + return q.ConnectionKeeper.Connections(c, req) +} + +// ClientConnections implements the IBC QueryServer interface +func (q Keeper) ClientConnections(c context.Context, req *connectiontypes.QueryClientConnectionsRequest) (*connectiontypes.QueryClientConnectionsResponse, error) { + return q.ConnectionKeeper.ClientConnections(c, req) +} + +// ConnectionClientState implements the IBC QueryServer interface +func (q Keeper) ConnectionClientState(c context.Context, req *connectiontypes.QueryConnectionClientStateRequest) (*connectiontypes.QueryConnectionClientStateResponse, error) { + return q.ConnectionKeeper.ConnectionClientState(c, req) +} + +// ConnectionConsensusState implements the IBC QueryServer interface +func (q Keeper) ConnectionConsensusState(c context.Context, req *connectiontypes.QueryConnectionConsensusStateRequest) (*connectiontypes.QueryConnectionConsensusStateResponse, error) { + return q.ConnectionKeeper.ConnectionConsensusState(c, req) +} + +// Channel implements the IBC QueryServer interface +func (q Keeper) Channel(c context.Context, req *channeltypes.QueryChannelRequest) (*channeltypes.QueryChannelResponse, error) { + return q.ChannelKeeper.Channel(c, req) +} + +// Channels implements the IBC QueryServer interface +func (q Keeper) Channels(c context.Context, req *channeltypes.QueryChannelsRequest) (*channeltypes.QueryChannelsResponse, error) { + return q.ChannelKeeper.Channels(c, req) +} + +// ConnectionChannels implements the IBC QueryServer interface +func (q Keeper) ConnectionChannels(c context.Context, req *channeltypes.QueryConnectionChannelsRequest) (*channeltypes.QueryConnectionChannelsResponse, error) { + return q.ChannelKeeper.ConnectionChannels(c, req) +} + +// ChannelClientState implements the IBC QueryServer interface +func (q Keeper) ChannelClientState(c context.Context, req *channeltypes.QueryChannelClientStateRequest) (*channeltypes.QueryChannelClientStateResponse, error) { + return q.ChannelKeeper.ChannelClientState(c, req) +} + +// ChannelConsensusState implements the IBC QueryServer interface +func (q Keeper) ChannelConsensusState(c context.Context, req *channeltypes.QueryChannelConsensusStateRequest) (*channeltypes.QueryChannelConsensusStateResponse, error) { + return q.ChannelKeeper.ChannelConsensusState(c, req) +} + +// PacketCommitment implements the IBC QueryServer interface +func (q Keeper) PacketCommitment(c context.Context, req *channeltypes.QueryPacketCommitmentRequest) (*channeltypes.QueryPacketCommitmentResponse, error) { + return q.ChannelKeeper.PacketCommitment(c, req) +} + +// PacketCommitments implements the IBC QueryServer interface +func (q Keeper) PacketCommitments(c context.Context, req *channeltypes.QueryPacketCommitmentsRequest) (*channeltypes.QueryPacketCommitmentsResponse, error) { + return q.ChannelKeeper.PacketCommitments(c, req) +} + +// PacketReceipt implements the IBC QueryServer interface +func (q Keeper) PacketReceipt(c context.Context, req *channeltypes.QueryPacketReceiptRequest) (*channeltypes.QueryPacketReceiptResponse, error) { + return q.ChannelKeeper.PacketReceipt(c, req) +} + +// PacketAcknowledgement implements the IBC QueryServer interface +func (q Keeper) PacketAcknowledgement(c context.Context, req *channeltypes.QueryPacketAcknowledgementRequest) (*channeltypes.QueryPacketAcknowledgementResponse, error) { + return q.ChannelKeeper.PacketAcknowledgement(c, req) +} + +// PacketAcknowledgements implements the IBC QueryServer interface +func (q Keeper) PacketAcknowledgements(c context.Context, req *channeltypes.QueryPacketAcknowledgementsRequest) (*channeltypes.QueryPacketAcknowledgementsResponse, error) { + return q.ChannelKeeper.PacketAcknowledgements(c, req) +} + +// UnreceivedPackets implements the IBC QueryServer interface +func (q Keeper) UnreceivedPackets(c context.Context, req *channeltypes.QueryUnreceivedPacketsRequest) (*channeltypes.QueryUnreceivedPacketsResponse, error) { + return q.ChannelKeeper.UnreceivedPackets(c, req) +} + +// UnreceivedAcks implements the IBC QueryServer interface +func (q Keeper) UnreceivedAcks(c context.Context, req *channeltypes.QueryUnreceivedAcksRequest) (*channeltypes.QueryUnreceivedAcksResponse, error) { + return q.ChannelKeeper.UnreceivedAcks(c, req) +} + +// NextSequenceReceive implements the IBC QueryServer interface +func (q Keeper) NextSequenceReceive(c context.Context, req *channeltypes.QueryNextSequenceReceiveRequest) (*channeltypes.QueryNextSequenceReceiveResponse, error) { + return q.ChannelKeeper.NextSequenceReceive(c, req) +} diff --git a/core/keeper/keeper.go b/core/keeper/keeper.go new file mode 100644 index 0000000000..5f9abc382e --- /dev/null +++ b/core/keeper/keeper.go @@ -0,0 +1,65 @@ +package keeper + +import ( + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + clientkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectionkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/keeper" + channelkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/keeper" + portkeeper "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/keeper" + porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ types.QueryServer = (*Keeper)(nil) + +// Keeper defines each ICS keeper for IBC +type Keeper struct { + // implements gRPC QueryServer interface + types.QueryServer + + cdc codec.BinaryMarshaler + + ClientKeeper clientkeeper.Keeper + ConnectionKeeper connectionkeeper.Keeper + ChannelKeeper channelkeeper.Keeper + PortKeeper portkeeper.Keeper + Router *porttypes.Router +} + +// NewKeeper creates a new ibc Keeper +func NewKeeper( + cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtypes.Subspace, + stakingKeeper clienttypes.StakingKeeper, scopedKeeper capabilitykeeper.ScopedKeeper, +) *Keeper { + clientKeeper := clientkeeper.NewKeeper(cdc, key, paramSpace, stakingKeeper) + connectionKeeper := connectionkeeper.NewKeeper(cdc, key, clientKeeper) + portKeeper := portkeeper.NewKeeper(scopedKeeper) + channelKeeper := channelkeeper.NewKeeper(cdc, key, clientKeeper, connectionKeeper, portKeeper, scopedKeeper) + + return &Keeper{ + cdc: cdc, + ClientKeeper: clientKeeper, + ConnectionKeeper: connectionKeeper, + ChannelKeeper: channelKeeper, + PortKeeper: portKeeper, + } +} + +// Codec returns the IBC module codec. +func (k Keeper) Codec() codec.BinaryMarshaler { + return k.cdc +} + +// SetRouter sets the Router in IBC Keeper and seals it. The method panics if +// there is an existing router that's already sealed. +func (k *Keeper) SetRouter(rtr *porttypes.Router) { + if k.Router != nil && k.Router.Sealed() { + panic("cannot reset a sealed router") + } + k.Router = rtr + k.Router.Seal() +} diff --git a/core/keeper/msg_server.go b/core/keeper/msg_server.go new file mode 100644 index 0000000000..dcddcaed16 --- /dev/null +++ b/core/keeper/msg_server.go @@ -0,0 +1,616 @@ +package keeper + +import ( + "context" + + "github.com/armon/go-metrics" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types" +) + +var _ clienttypes.MsgServer = Keeper{} +var _ connectiontypes.MsgServer = Keeper{} +var _ channeltypes.MsgServer = Keeper{} + +// CreateClient defines a rpc handler method for MsgCreateClient. +func (k Keeper) CreateClient(goCtx context.Context, msg *clienttypes.MsgCreateClient) (*clienttypes.MsgCreateClientResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + clientState, err := clienttypes.UnpackClientState(msg.ClientState) + if err != nil { + return nil, err + } + + consensusState, err := clienttypes.UnpackConsensusState(msg.ConsensusState) + if err != nil { + return nil, err + } + + clientID, err := k.ClientKeeper.CreateClient(ctx, clientState, consensusState) + if err != nil { + return nil, err + } + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + clienttypes.EventTypeCreateClient, + sdk.NewAttribute(clienttypes.AttributeKeyClientID, clientID), + sdk.NewAttribute(clienttypes.AttributeKeyClientType, clientState.ClientType()), + sdk.NewAttribute(clienttypes.AttributeKeyConsensusHeight, clientState.GetLatestHeight().String()), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, clienttypes.AttributeValueCategory), + ), + }) + + return &clienttypes.MsgCreateClientResponse{}, nil +} + +// UpdateClient defines a rpc handler method for MsgUpdateClient. +func (k Keeper) UpdateClient(goCtx context.Context, msg *clienttypes.MsgUpdateClient) (*clienttypes.MsgUpdateClientResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + header, err := clienttypes.UnpackHeader(msg.Header) + if err != nil { + return nil, err + } + + if err = k.ClientKeeper.UpdateClient(ctx, msg.ClientId, header); err != nil { + return nil, err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, clienttypes.AttributeValueCategory), + ), + ) + + return &clienttypes.MsgUpdateClientResponse{}, nil +} + +// UpgradeClient defines a rpc handler method for MsgUpgradeClient. +func (k Keeper) UpgradeClient(goCtx context.Context, msg *clienttypes.MsgUpgradeClient) (*clienttypes.MsgUpgradeClientResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + upgradedClient, err := clienttypes.UnpackClientState(msg.ClientState) + if err != nil { + return nil, err + } + upgradedConsState, err := clienttypes.UnpackConsensusState(msg.ConsensusState) + if err != nil { + return nil, err + } + + if err = k.ClientKeeper.UpgradeClient(ctx, msg.ClientId, upgradedClient, upgradedConsState, + msg.ProofUpgradeClient, msg.ProofUpgradeConsensusState); err != nil { + return nil, err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, clienttypes.AttributeValueCategory), + ), + ) + + return &clienttypes.MsgUpgradeClientResponse{}, nil +} + +// SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. +func (k Keeper) SubmitMisbehaviour(goCtx context.Context, msg *clienttypes.MsgSubmitMisbehaviour) (*clienttypes.MsgSubmitMisbehaviourResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + misbehaviour, err := clienttypes.UnpackMisbehaviour(msg.Misbehaviour) + if err != nil { + return nil, err + } + + if err := k.ClientKeeper.CheckMisbehaviourAndUpdateState(ctx, misbehaviour); err != nil { + return nil, sdkerrors.Wrap(err, "failed to process misbehaviour for IBC client") + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + clienttypes.EventTypeSubmitMisbehaviour, + sdk.NewAttribute(clienttypes.AttributeKeyClientID, msg.ClientId), + sdk.NewAttribute(clienttypes.AttributeKeyClientType, misbehaviour.ClientType()), + sdk.NewAttribute(clienttypes.AttributeKeyConsensusHeight, misbehaviour.GetHeight().String()), + ), + ) + + return &clienttypes.MsgSubmitMisbehaviourResponse{}, nil +} + +// ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit. +func (k Keeper) ConnectionOpenInit(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenInit) (*connectiontypes.MsgConnectionOpenInitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + connectionID, err := k.ConnectionKeeper.ConnOpenInit(ctx, msg.ClientId, msg.Counterparty, msg.Version, msg.DelayPeriod) + if err != nil { + return nil, sdkerrors.Wrap(err, "connection handshake open init failed") + } + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + connectiontypes.EventTypeConnectionOpenInit, + sdk.NewAttribute(connectiontypes.AttributeKeyConnectionID, connectionID), + sdk.NewAttribute(connectiontypes.AttributeKeyClientID, msg.ClientId), + sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyClientID, msg.Counterparty.ClientId), + sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyConnectionID, msg.Counterparty.ConnectionId), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory), + ), + }) + + return &connectiontypes.MsgConnectionOpenInitResponse{}, nil +} + +// ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry. +func (k Keeper) ConnectionOpenTry(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenTry) (*connectiontypes.MsgConnectionOpenTryResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + targetClient, err := clienttypes.UnpackClientState(msg.ClientState) + if err != nil { + return nil, sdkerrors.Wrapf(err, "client in msg is not exported.ClientState. invalid client: %v.", targetClient) + } + + connectionID, err := k.ConnectionKeeper.ConnOpenTry( + ctx, msg.PreviousConnectionId, msg.Counterparty, msg.DelayPeriod, msg.ClientId, targetClient, + connectiontypes.ProtoVersionsToExported(msg.CounterpartyVersions), msg.ProofInit, msg.ProofClient, msg.ProofConsensus, + msg.ProofHeight, msg.ConsensusHeight, + ) + if err != nil { + return nil, sdkerrors.Wrap(err, "connection handshake open try failed") + } + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + connectiontypes.EventTypeConnectionOpenTry, + sdk.NewAttribute(connectiontypes.AttributeKeyConnectionID, connectionID), + sdk.NewAttribute(connectiontypes.AttributeKeyClientID, msg.ClientId), + sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyClientID, msg.Counterparty.ClientId), + sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyConnectionID, msg.Counterparty.ConnectionId), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory), + ), + }) + + return &connectiontypes.MsgConnectionOpenTryResponse{}, nil +} + +// ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck. +func (k Keeper) ConnectionOpenAck(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenAck) (*connectiontypes.MsgConnectionOpenAckResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + targetClient, err := clienttypes.UnpackClientState(msg.ClientState) + if err != nil { + return nil, sdkerrors.Wrapf(err, "client in msg is not exported.ClientState. invalid client: %v", targetClient) + } + + if err := k.ConnectionKeeper.ConnOpenAck( + ctx, msg.ConnectionId, targetClient, msg.Version, msg.CounterpartyConnectionId, + msg.ProofTry, msg.ProofClient, msg.ProofConsensus, + msg.ProofHeight, msg.ConsensusHeight, + ); err != nil { + return nil, sdkerrors.Wrap(err, "connection handshake open ack failed") + } + + connectionEnd, _ := k.ConnectionKeeper.GetConnection(ctx, msg.ConnectionId) + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + connectiontypes.EventTypeConnectionOpenAck, + sdk.NewAttribute(connectiontypes.AttributeKeyConnectionID, msg.ConnectionId), + sdk.NewAttribute(connectiontypes.AttributeKeyClientID, connectionEnd.ClientId), + sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyClientID, connectionEnd.Counterparty.ClientId), + sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyConnectionID, connectionEnd.Counterparty.ConnectionId), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory), + ), + }) + + return &connectiontypes.MsgConnectionOpenAckResponse{}, nil +} + +// ConnectionOpenConfirm defines a rpc handler method for MsgConnectionOpenConfirm. +func (k Keeper) ConnectionOpenConfirm(goCtx context.Context, msg *connectiontypes.MsgConnectionOpenConfirm) (*connectiontypes.MsgConnectionOpenConfirmResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if err := k.ConnectionKeeper.ConnOpenConfirm( + ctx, msg.ConnectionId, msg.ProofAck, msg.ProofHeight, + ); err != nil { + return nil, sdkerrors.Wrap(err, "connection handshake open confirm failed") + } + + connectionEnd, _ := k.ConnectionKeeper.GetConnection(ctx, msg.ConnectionId) + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + connectiontypes.EventTypeConnectionOpenConfirm, + sdk.NewAttribute(connectiontypes.AttributeKeyConnectionID, msg.ConnectionId), + sdk.NewAttribute(connectiontypes.AttributeKeyClientID, connectionEnd.ClientId), + sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyClientID, connectionEnd.Counterparty.ClientId), + sdk.NewAttribute(connectiontypes.AttributeKeyCounterpartyConnectionID, connectionEnd.Counterparty.ConnectionId), + ), + sdk.NewEvent( + sdk.EventTypeMessage, + sdk.NewAttribute(sdk.AttributeKeyModule, connectiontypes.AttributeValueCategory), + ), + }) + + return &connectiontypes.MsgConnectionOpenConfirmResponse{}, nil +} + +// ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit. +func (k Keeper) ChannelOpenInit(goCtx context.Context, msg *channeltypes.MsgChannelOpenInit) (*channeltypes.MsgChannelOpenInitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Lookup module by port capability + module, portCap, err := k.PortKeeper.LookupModuleByPort(ctx, msg.PortId) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + _, channelID, cap, err := channel.HandleMsgChannelOpenInit(ctx, k.ChannelKeeper, portCap, msg) + if err != nil { + return nil, err + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + if err = cbs.OnChanOpenInit(ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, channelID, cap, msg.Channel.Counterparty, msg.Channel.Version); err != nil { + return nil, sdkerrors.Wrap(err, "channel open init callback failed") + } + + return &channeltypes.MsgChannelOpenInitResponse{}, nil +} + +// ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry. +func (k Keeper) ChannelOpenTry(goCtx context.Context, msg *channeltypes.MsgChannelOpenTry) (*channeltypes.MsgChannelOpenTryResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + // Lookup module by port capability + module, portCap, err := k.PortKeeper.LookupModuleByPort(ctx, msg.PortId) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + _, channelID, cap, err := channel.HandleMsgChannelOpenTry(ctx, k.ChannelKeeper, portCap, msg) + if err != nil { + return nil, err + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + if err = cbs.OnChanOpenTry(ctx, msg.Channel.Ordering, msg.Channel.ConnectionHops, msg.PortId, channelID, cap, msg.Channel.Counterparty, msg.Channel.Version, msg.CounterpartyVersion); err != nil { + return nil, sdkerrors.Wrap(err, "channel open try callback failed") + } + + return &channeltypes.MsgChannelOpenTryResponse{}, nil +} + +// ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck. +func (k Keeper) ChannelOpenAck(goCtx context.Context, msg *channeltypes.MsgChannelOpenAck) (*channeltypes.MsgChannelOpenAckResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Lookup module by channel capability + module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.PortId, msg.ChannelId) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + _, err = channel.HandleMsgChannelOpenAck(ctx, k.ChannelKeeper, cap, msg) + if err != nil { + return nil, err + } + + if err = cbs.OnChanOpenAck(ctx, msg.PortId, msg.ChannelId, msg.CounterpartyVersion); err != nil { + return nil, sdkerrors.Wrap(err, "channel open ack callback failed") + } + + return &channeltypes.MsgChannelOpenAckResponse{}, nil +} + +// ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm. +func (k Keeper) ChannelOpenConfirm(goCtx context.Context, msg *channeltypes.MsgChannelOpenConfirm) (*channeltypes.MsgChannelOpenConfirmResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Lookup module by channel capability + module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.PortId, msg.ChannelId) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + _, err = channel.HandleMsgChannelOpenConfirm(ctx, k.ChannelKeeper, cap, msg) + if err != nil { + return nil, err + } + + if err = cbs.OnChanOpenConfirm(ctx, msg.PortId, msg.ChannelId); err != nil { + return nil, sdkerrors.Wrap(err, "channel open confirm callback failed") + } + + return &channeltypes.MsgChannelOpenConfirmResponse{}, nil +} + +// ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit. +func (k Keeper) ChannelCloseInit(goCtx context.Context, msg *channeltypes.MsgChannelCloseInit) (*channeltypes.MsgChannelCloseInitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + // Lookup module by channel capability + module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.PortId, msg.ChannelId) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + if err = cbs.OnChanCloseInit(ctx, msg.PortId, msg.ChannelId); err != nil { + return nil, sdkerrors.Wrap(err, "channel close init callback failed") + } + + _, err = channel.HandleMsgChannelCloseInit(ctx, k.ChannelKeeper, cap, msg) + if err != nil { + return nil, err + } + + return &channeltypes.MsgChannelCloseInitResponse{}, nil +} + +// ChannelCloseConfirm defines a rpc handler method for MsgChannelCloseConfirm. +func (k Keeper) ChannelCloseConfirm(goCtx context.Context, msg *channeltypes.MsgChannelCloseConfirm) (*channeltypes.MsgChannelCloseConfirmResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Lookup module by channel capability + module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.PortId, msg.ChannelId) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + if err = cbs.OnChanCloseConfirm(ctx, msg.PortId, msg.ChannelId); err != nil { + return nil, sdkerrors.Wrap(err, "channel close confirm callback failed") + } + + _, err = channel.HandleMsgChannelCloseConfirm(ctx, k.ChannelKeeper, cap, msg) + if err != nil { + return nil, err + } + + return &channeltypes.MsgChannelCloseConfirmResponse{}, nil +} + +// RecvPacket defines a rpc handler method for MsgRecvPacket. +func (k Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPacket) (*channeltypes.MsgRecvPacketResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Lookup module by channel capability + module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.DestinationPort, msg.Packet.DestinationChannel) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + // Perform TAO verification + if err := k.ChannelKeeper.RecvPacket(ctx, cap, msg.Packet, msg.ProofCommitment, msg.ProofHeight); err != nil { + return nil, sdkerrors.Wrap(err, "receive packet verification failed") + } + + // Perform application logic callback + _, ack, err := cbs.OnRecvPacket(ctx, msg.Packet) + if err != nil { + return nil, sdkerrors.Wrap(err, "receive packet callback failed") + } + + // Set packet acknowledgement only if the acknowledgement is not nil. + // NOTE: IBC applications modules may call the WriteAcknowledgement asynchronously if the + // acknowledgement is nil. + if ack != nil { + if err := k.ChannelKeeper.WriteAcknowledgement(ctx, cap, msg.Packet, ack); err != nil { + return nil, err + } + } + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"tx", "msg", "ibc", msg.Type()}, + 1, + []metrics.Label{ + telemetry.NewLabel("source-port", msg.Packet.SourcePort), + telemetry.NewLabel("source-channel", msg.Packet.SourceChannel), + telemetry.NewLabel("destination-port", msg.Packet.DestinationPort), + telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel), + }, + ) + }() + + return &channeltypes.MsgRecvPacketResponse{}, nil +} + +// Timeout defines a rpc handler method for MsgTimeout. +func (k Keeper) Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (*channeltypes.MsgTimeoutResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + // Lookup module by channel capability + module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + // Perform TAO verification + if err := k.ChannelKeeper.TimeoutPacket(ctx, msg.Packet, msg.ProofUnreceived, msg.ProofHeight, msg.NextSequenceRecv); err != nil { + return nil, sdkerrors.Wrap(err, "timeout packet verification failed") + } + + // Perform application logic callback + _, err = cbs.OnTimeoutPacket(ctx, msg.Packet) + if err != nil { + return nil, sdkerrors.Wrap(err, "timeout packet callback failed") + } + + // Delete packet commitment + if err = k.ChannelKeeper.TimeoutExecuted(ctx, cap, msg.Packet); err != nil { + return nil, err + } + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"ibc", "timeout", "packet"}, + 1, + []metrics.Label{ + telemetry.NewLabel("source-port", msg.Packet.SourcePort), + telemetry.NewLabel("source-channel", msg.Packet.SourceChannel), + telemetry.NewLabel("destination-port", msg.Packet.DestinationPort), + telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel), + telemetry.NewLabel("timeout-type", "height"), + }, + ) + }() + + return &channeltypes.MsgTimeoutResponse{}, nil +} + +// TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose. +func (k Keeper) TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTimeoutOnClose) (*channeltypes.MsgTimeoutOnCloseResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Lookup module by channel capability + module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + // Perform TAO verification + if err := k.ChannelKeeper.TimeoutOnClose(ctx, cap, msg.Packet, msg.ProofUnreceived, msg.ProofClose, msg.ProofHeight, msg.NextSequenceRecv); err != nil { + return nil, sdkerrors.Wrap(err, "timeout on close packet verification failed") + } + + // Perform application logic callback + // NOTE: MsgTimeout and MsgTimeoutOnClose use the same "OnTimeoutPacket" + // application logic callback. + _, err = cbs.OnTimeoutPacket(ctx, msg.Packet) + if err != nil { + return nil, sdkerrors.Wrap(err, "timeout packet callback failed") + } + + // Delete packet commitment + if err = k.ChannelKeeper.TimeoutExecuted(ctx, cap, msg.Packet); err != nil { + return nil, err + } + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"ibc", "timeout", "packet"}, + 1, + []metrics.Label{ + telemetry.NewLabel("source-port", msg.Packet.SourcePort), + telemetry.NewLabel("source-channel", msg.Packet.SourceChannel), + telemetry.NewLabel("destination-port", msg.Packet.DestinationPort), + telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel), + telemetry.NewLabel("timeout-type", "channel-closed"), + }, + ) + }() + + return &channeltypes.MsgTimeoutOnCloseResponse{}, nil +} + +// Acknowledgement defines a rpc handler method for MsgAcknowledgement. +func (k Keeper) Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAcknowledgement) (*channeltypes.MsgAcknowledgementResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Lookup module by channel capability + module, cap, err := k.ChannelKeeper.LookupModuleByChannel(ctx, msg.Packet.SourcePort, msg.Packet.SourceChannel) + if err != nil { + return nil, sdkerrors.Wrap(err, "could not retrieve module from port-id") + } + + // Retrieve callbacks from router + cbs, ok := k.Router.GetRoute(module) + if !ok { + return nil, sdkerrors.Wrapf(porttypes.ErrInvalidRoute, "route not found to module: %s", module) + } + + // Perform TAO verification + if err := k.ChannelKeeper.AcknowledgePacket(ctx, cap, msg.Packet, msg.Acknowledgement, msg.ProofAcked, msg.ProofHeight); err != nil { + return nil, sdkerrors.Wrap(err, "acknowledge packet verification failed") + } + + // Perform application logic callback + _, err = cbs.OnAcknowledgementPacket(ctx, msg.Packet, msg.Acknowledgement) + if err != nil { + return nil, sdkerrors.Wrap(err, "acknowledge packet callback failed") + } + + defer func() { + telemetry.IncrCounterWithLabels( + []string{"tx", "msg", "ibc", msg.Type()}, + 1, + []metrics.Label{ + telemetry.NewLabel("source-port", msg.Packet.SourcePort), + telemetry.NewLabel("source-channel", msg.Packet.SourceChannel), + telemetry.NewLabel("destination-port", msg.Packet.DestinationPort), + telemetry.NewLabel("destination-channel", msg.Packet.DestinationChannel), + }, + ) + }() + + return &channeltypes.MsgAcknowledgementResponse{}, nil +} diff --git a/core/keeper/msg_server_test.go b/core/keeper/msg_server_test.go new file mode 100644 index 0000000000..1af4cdc18e --- /dev/null +++ b/core/keeper/msg_server_test.go @@ -0,0 +1,714 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +const height = 10 + +var ( + timeoutHeight = clienttypes.NewHeight(0, 10000) + maxSequence = uint64(10) +) + +type KeeperTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +// SetupTest creates a coordinator with 2 test chains. +func (suite *KeeperTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) + suite.coordinator.CommitNBlocks(suite.chainA, 2) + suite.coordinator.CommitNBlocks(suite.chainB, 2) +} + +func TestIBCTestSuite(t *testing.T) { + suite.Run(t, new(KeeperTestSuite)) +} + +// tests the IBC handler receiving a packet on ordered and unordered channels. +// It verifies that the storing of an acknowledgement on success occurs. It +// tests high level properties like ordering and basic sanity checks. More +// rigorous testing of 'RecvPacket' can be found in the +// 04-channel/keeper/packet_test.go. +func (suite *KeeperTestSuite) TestHandleRecvPacket() { + var ( + packet channeltypes.Packet + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + {"success: ORDERED", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + }, true}, + {"success: UNORDERED", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + }, true}, + {"success: UNORDERED out of order packet", func() { + // setup uses an UNORDERED channel + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + // attempts to receive packet with sequence 10 without receiving packet with sequence 1 + for i := uint64(1); i < 10; i++ { + packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + } + }, true}, + {"failure: ORDERED out of order packet", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + + // attempts to receive packet with sequence 10 without receiving packet with sequence 1 + for i := uint64(1); i < 10; i++ { + packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + } + }, false}, + {"channel does not exist", func() { + // any non-nil value of packet is valid + suite.Require().NotNil(packet) + }, false}, + {"packet not sent", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + }, false}, + {"ORDERED: packet already received (replay)", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + }, false}, + {"UNORDERED: packet already received (replay)", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + }, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + tc.malleate() + + // get proof of packet commitment from chainA + packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + proof, proofHeight := suite.chainA.QueryProof(packetKey) + + msg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress()) + + // ante-handle RecvPacket + _, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg) + + if tc.expPass { + suite.Require().NoError(err) + + // replay should fail since state changes occur + _, err := keeper.Keeper.RecvPacket(*suite.chainB.App.IBCKeeper, sdk.WrapSDKContext(suite.chainB.GetContext()), msg) + suite.Require().Error(err) + + // verify ack was written + ack, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + suite.Require().NotNil(ack) + suite.Require().True(found) + } else { + suite.Require().Error(err) + } + }) + } +} + +// tests the IBC handler acknowledgement of a packet on ordered and unordered +// channels. It verifies that the deletion of packet commitments from state +// occurs. It test high level properties like ordering and basic sanity +// checks. More rigorous testing of 'AcknowledgePacket' +// can be found in the 04-channel/keeper/packet_test.go. +func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() { + var ( + packet channeltypes.Packet + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + {"success: ORDERED", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + }, true}, + {"success: UNORDERED", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + }, true}, + {"success: UNORDERED acknowledge out of order packet", func() { + // setup uses an UNORDERED channel + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + // attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment) + for i := uint64(1); i < 10; i++ { + packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + } + }, true}, + {"failure: ORDERED acknowledge out of order packet", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + + // attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment + for i := uint64(1); i < 10; i++ { + packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + } + }, false}, + {"channel does not exist", func() { + // any non-nil value of packet is valid + suite.Require().NotNil(packet) + }, false}, + {"packet not received", func() { + _, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + }, false}, + {"ORDERED: packet already acknowledged (replay)", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.TestHash) + suite.Require().NoError(err) + }, false}, + {"UNORDERED: packet already acknowledged (replay)", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + err = suite.coordinator.AcknowledgePacket(suite.chainA, suite.chainB, clientB, packet, ibctesting.TestHash) + suite.Require().NoError(err) + }, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + ibctesting.TestHash = ibctesting.MockAcknowledgement + + tc.malleate() + + packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight := suite.chainB.QueryProof(packetKey) + + msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement, proof, proofHeight, suite.chainA.SenderAccount.GetAddress()) + + _, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg) + + if tc.expPass { + suite.Require().NoError(err) + + // replay should an error + _, err := keeper.Keeper.Acknowledgement(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg) + suite.Require().Error(err) + + // verify packet commitment was deleted on source chain + has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + suite.Require().False(has) + + } else { + suite.Require().Error(err) + } + }) + } +} + +// tests the IBC handler timing out a packet on ordered and unordered channels. +// It verifies that the deletion of a packet commitment occurs. It tests +// high level properties like ordering and basic sanity checks. More +// rigorous testing of 'TimeoutPacket' and 'TimeoutExecuted' can be found in +// the 04-channel/keeper/timeout_test.go. +func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { + var ( + packet channeltypes.Packet + packetKey []byte + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + {"success: ORDERED", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // need to update chainA client to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + }, true}, + {"success: UNORDERED", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // need to update chainA client to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + }, true}, + {"success: UNORDERED timeout out of order packet", func() { + // setup uses an UNORDERED channel + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + + // attempts to timeout the last packet sent without timing out the first packet + // packet sequences begin at 1 + for i := uint64(1); i < maxSequence; i++ { + packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0) + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + } + + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + }, true}, + {"success: ORDERED timeout out of order packet", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + + // attempts to timeout the last packet sent without timing out the first packet + // packet sequences begin at 1 + for i := uint64(1); i < maxSequence; i++ { + packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), 0) + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + } + + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + + }, true}, + {"channel does not exist", func() { + // any non-nil value of packet is valid + suite.Require().NotNil(packet) + + packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + }, false}, + {"UNORDERED: packet not sent", func() { + _, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + }, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + tc.malleate() + + proof, proofHeight := suite.chainB.QueryProof(packetKey) + + msg := channeltypes.NewMsgTimeout(packet, 1, proof, proofHeight, suite.chainA.SenderAccount.GetAddress()) + + _, err := keeper.Keeper.Timeout(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg) + + if tc.expPass { + suite.Require().NoError(err) + + // replay should return an error + _, err := keeper.Keeper.Timeout(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg) + suite.Require().Error(err) + + // verify packet commitment was deleted on source chain + has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + suite.Require().False(has) + + } else { + suite.Require().Error(err) + } + }) + } +} + +// tests the IBC handler timing out a packet via channel closure on ordered +// and unordered channels. It verifies that the deletion of a packet +// commitment occurs. It tests high level properties like ordering and basic +// sanity checks. More rigorous testing of 'TimeoutOnClose' and +//'TimeoutExecuted' can be found in the 04-channel/keeper/timeout_test.go. +func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { + var ( + packet channeltypes.Packet + packetKey []byte + counterpartyChannel ibctesting.TestChannel + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + {"success: ORDERED", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + counterpartyChannel = ibctesting.TestChannel{ + PortID: channelB.PortID, + ID: channelB.ID, + CounterpartyClientID: clientA, + } + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // need to update chainA client to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + + // close counterparty channel + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel) + }, true}, + {"success: UNORDERED", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + counterpartyChannel = ibctesting.TestChannel{ + PortID: channelB.PortID, + ID: channelB.ID, + CounterpartyClientID: clientA, + } + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // need to update chainA client to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + + // close counterparty channel + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel) + }, true}, + {"success: UNORDERED timeout out of order packet", func() { + // setup uses an UNORDERED channel + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + counterpartyChannel = ibctesting.TestChannel{ + PortID: channelB.PortID, + ID: channelB.ID, + CounterpartyClientID: clientA, + } + + // attempts to timeout the last packet sent without timing out the first packet + // packet sequences begin at 1 + for i := uint64(1); i < maxSequence; i++ { + packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + } + + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + + // close counterparty channel + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel) + }, true}, + {"success: ORDERED timeout out of order packet", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + counterpartyChannel = ibctesting.TestChannel{ + PortID: channelB.PortID, + ID: channelB.ID, + CounterpartyClientID: clientA, + } + + // attempts to timeout the last packet sent without timing out the first packet + // packet sequences begin at 1 + for i := uint64(1); i < maxSequence; i++ { + packet = channeltypes.NewPacket(ibctesting.MockCommitment, i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + } + + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + + // close counterparty channel + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel) + }, true}, + {"channel does not exist", func() { + // any non-nil value of packet is valid + suite.Require().NotNil(packet) + + packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + }, false}, + {"UNORDERED: packet not sent", func() { + clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + packetKey = host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + counterpartyChannel = ibctesting.TestChannel{ + PortID: channelB.PortID, + ID: channelB.ID, + CounterpartyClientID: clientA, + } + + // close counterparty channel + suite.coordinator.SetChannelClosed(suite.chainB, suite.chainA, counterpartyChannel) + }, false}, + {"ORDERED: channel not closed", func() { + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + packet = channeltypes.NewPacket(ibctesting.MockCommitment, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0) + counterpartyChannel = ibctesting.TestChannel{ + PortID: channelB.PortID, + ID: channelB.ID, + CounterpartyClientID: clientA, + } + + // create packet commitment + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // need to update chainA client to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + }, false}, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + tc.malleate() + + proof, proofHeight := suite.chainB.QueryProof(packetKey) + + channelKey := host.ChannelKey(counterpartyChannel.PortID, counterpartyChannel.ID) + proofClosed, _ := suite.chainB.QueryProof(channelKey) + + msg := channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, proofClosed, proofHeight, suite.chainA.SenderAccount.GetAddress()) + + _, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg) + + if tc.expPass { + suite.Require().NoError(err) + + // replay should return an error + _, err := keeper.Keeper.TimeoutOnClose(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg) + suite.Require().Error(err) + + // verify packet commitment was deleted on source chain + has := suite.chainA.App.IBCKeeper.ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + suite.Require().False(has) + + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *KeeperTestSuite) TestUpgradeClient() { + var ( + clientA string + upgradedClient exported.ClientState + upgradedConsState exported.ConsensusState + lastHeight exported.Height + msg *clienttypes.MsgUpgradeClient + ) + + newClientHeight := clienttypes.NewHeight(1, 1) + + cases := []struct { + name string + setup func() + expPass bool + }{ + { + name: "successful upgrade", + setup: func() { + + upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod+ibctesting.TrustingPeriod, ibctesting.MaxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient + upgradedClient = upgradedClient.ZeroCustomFields() + + upgradedConsState = &ibctmtypes.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // last Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradeClient, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState, + proofUpgradeClient, proofUpgradedConsState, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + expPass: true, + }, + { + name: "VerifyUpgrade fails", + setup: func() { + + upgradedClient = ibctmtypes.NewClientState("newChainId", ibctmtypes.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod+ibctesting.TrustingPeriod, ibctesting.MaxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false) + // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient + upgradedClient = upgradedClient.ZeroCustomFields() + + upgradedConsState = &ibctmtypes.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // last Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + msg, err = clienttypes.NewMsgUpgradeClient(clientA, upgradedClient, upgradedConsState, nil, nil, suite.chainA.SenderAccount.GetAddress()) + suite.Require().NoError(err) + }, + expPass: false, + }, + } + + for _, tc := range cases { + tc := tc + clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + tc.setup() + + _, err := keeper.Keeper.UpgradeClient(*suite.chainA.App.IBCKeeper, sdk.WrapSDKContext(suite.chainA.GetContext()), msg) + + if tc.expPass { + suite.Require().NoError(err, "upgrade handler failed on valid case: %s", tc.name) + newClient, ok := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(ok) + newChainSpecifiedClient := newClient.ZeroCustomFields() + suite.Require().Equal(upgradedClient, newChainSpecifiedClient) + } else { + suite.Require().Error(err, "upgrade handler passed on invalid case: %s", tc.name) + } + } +} diff --git a/core/module.go b/core/module.go new file mode 100644 index 0000000000..6527ab71eb --- /dev/null +++ b/core/module.go @@ -0,0 +1,200 @@ +package ibc + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + + "github.com/gorilla/mux" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + ibcclient "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/client/cli" + "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper" + "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/core/types" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} + _ module.AppModuleSimulation = AppModule{} +) + +// AppModuleBasic defines the basic application module used by the ibc module. +type AppModuleBasic struct{} + +var _ module.AppModuleBasic = AppModuleBasic{} + +// Name returns the ibc module's name. +func (AppModuleBasic) Name() string { + return host.ModuleName +} + +// RegisterLegacyAminoCodec does nothing. IBC does not support amino. +func (AppModuleBasic) RegisterLegacyAminoCodec(*codec.LegacyAmino) {} + +// DefaultGenesis returns default genesis state as raw bytes for the ibc +// module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// ValidateGenesis performs genesis state validation for the ibc module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONMarshaler, config client.TxEncodingConfig, bz json.RawMessage) error { + var gs types.GenesisState + if err := cdc.UnmarshalJSON(bz, &gs); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", host.ModuleName, err) + } + + return gs.Validate() +} + +// RegisterRESTRoutes does nothing. IBC does not support legacy REST routes. +func (AppModuleBasic) RegisterRESTRoutes(client.Context, *mux.Router) {} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the ibc module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + clienttypes.RegisterQueryHandlerClient(context.Background(), mux, clienttypes.NewQueryClient(clientCtx)) + connectiontypes.RegisterQueryHandlerClient(context.Background(), mux, connectiontypes.NewQueryClient(clientCtx)) + channeltypes.RegisterQueryHandlerClient(context.Background(), mux, channeltypes.NewQueryClient(clientCtx)) +} + +// GetTxCmd returns the root tx command for the ibc module. +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns no root query command for the ibc module. +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// RegisterInterfaces registers module concrete types into protobuf Any. +func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) { + types.RegisterInterfaces(registry) +} + +// AppModule implements an application module for the ibc module. +type AppModule struct { + AppModuleBasic + keeper *keeper.Keeper + + // create localhost by default + createLocalhost bool +} + +// NewAppModule creates a new AppModule object +func NewAppModule(k *keeper.Keeper) AppModule { + return AppModule{ + keeper: k, + } +} + +// Name returns the ibc module's name. +func (AppModule) Name() string { + return host.ModuleName +} + +// RegisterInvariants registers the ibc module invariants. +func (am AppModule) RegisterInvariants(ir sdk.InvariantRegistry) { + // TODO: +} + +// Route returns the message routing key for the ibc module. +func (am AppModule) Route() sdk.Route { + return sdk.NewRoute(host.RouterKey, NewHandler(*am.keeper)) +} + +// QuerierRoute returns the ibc module's querier route name. +func (AppModule) QuerierRoute() string { + return host.QuerierRoute +} + +// LegacyQuerierHandler returns nil. IBC does not support the legacy querier. +func (am AppModule) LegacyQuerierHandler(legacyQuerierCdc *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + clienttypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) + connectiontypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) + channeltypes.RegisterMsgServer(cfg.MsgServer(), am.keeper) + types.RegisterQueryService(cfg.QueryServer(), am.keeper) +} + +// InitGenesis performs genesis initialization for the ibc module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, bz json.RawMessage) []abci.ValidatorUpdate { + var gs types.GenesisState + err := cdc.UnmarshalJSON(bz, &gs) + if err != nil { + panic(fmt.Sprintf("failed to unmarshal %s genesis state: %s", host.ModuleName, err)) + } + InitGenesis(ctx, *am.keeper, am.createLocalhost, &gs) + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the exported genesis state as raw bytes for the ibc +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage { + return cdc.MustMarshalJSON(ExportGenesis(ctx, *am.keeper)) +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock returns the begin blocker for the ibc module. +func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { + ibcclient.BeginBlocker(ctx, am.keeper.ClientKeeper) +} + +// EndBlock returns the end blocker for the ibc module. It returns no validator +// updates. +func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +//____________________________________________________________________________ + +// AppModuleSimulation functions + +// GenerateGenesisState creates a randomized GenState of the ibc module. +func (AppModule) GenerateGenesisState(simState *module.SimulationState) { + simulation.RandomizedGenState(simState) +} + +// ProposalContents doesn't return any content functions for governance proposals. +func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent { + return nil +} + +// RandomizedParams returns nil since IBC doesn't register parameter changes. +func (AppModule) RandomizedParams(_ *rand.Rand) []simtypes.ParamChange { + return nil +} + +// RegisterStoreDecoder registers a decoder for ibc module's types +func (am AppModule) RegisterStoreDecoder(sdr sdk.StoreDecoderRegistry) { + sdr[host.StoreKey] = simulation.NewDecodeStore(*am.keeper) +} + +// WeightedOperations returns the all the ibc module operations with their respective weights. +func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation { + return nil +} diff --git a/core/simulation/decoder.go b/core/simulation/decoder.go new file mode 100644 index 0000000000..459eebb8f0 --- /dev/null +++ b/core/simulation/decoder.go @@ -0,0 +1,32 @@ +package simulation + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/types/kv" + clientsim "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation" + connectionsim "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation" + channelsim "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/keeper" +) + +// NewDecodeStore returns a decoder function closure that unmarshals the KVPair's +// Value to the corresponding ibc type. +func NewDecodeStore(k keeper.Keeper) func(kvA, kvB kv.Pair) string { + return func(kvA, kvB kv.Pair) string { + if res, found := clientsim.NewDecodeStore(k.ClientKeeper, kvA, kvB); found { + return res + } + + if res, found := connectionsim.NewDecodeStore(k.Codec(), kvA, kvB); found { + return res + } + + if res, found := channelsim.NewDecodeStore(k.Codec(), kvA, kvB); found { + return res + } + + panic(fmt.Sprintf("invalid %s key prefix: %s", host.ModuleName, string(kvA.Key))) + } +} diff --git a/core/simulation/decoder_test.go b/core/simulation/decoder_test.go new file mode 100644 index 0000000000..0951572743 --- /dev/null +++ b/core/simulation/decoder_test.go @@ -0,0 +1,80 @@ +package simulation_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/types/kv" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" +) + +func TestDecodeStore(t *testing.T) { + app := simapp.Setup(false) + dec := simulation.NewDecodeStore(*app.IBCKeeper) + + clientID := "clientidone" + connectionID := "connectionidone" + channelID := "channelidone" + portID := "portidone" + + clientState := &ibctmtypes.ClientState{ + FrozenHeight: clienttypes.NewHeight(0, 10), + } + connection := connectiontypes.ConnectionEnd{ + ClientId: "clientidone", + Versions: []*connectiontypes.Version{connectiontypes.NewVersion("1", nil)}, + } + channel := channeltypes.Channel{ + State: channeltypes.OPEN, + Version: "1.0", + } + + kvPairs := kv.Pairs{ + Pairs: []kv.Pair{ + { + Key: host.FullClientStateKey(clientID), + Value: app.IBCKeeper.ClientKeeper.MustMarshalClientState(clientState), + }, + { + Key: host.ConnectionKey(connectionID), + Value: app.IBCKeeper.Codec().MustMarshalBinaryBare(&connection), + }, + { + Key: host.ChannelKey(portID, channelID), + Value: app.IBCKeeper.Codec().MustMarshalBinaryBare(&channel), + }, + { + Key: []byte{0x99}, + Value: []byte{0x99}, + }, + }, + } + tests := []struct { + name string + expectedLog string + }{ + {"ClientState", fmt.Sprintf("ClientState A: %v\nClientState B: %v", clientState, clientState)}, + {"ConnectionEnd", fmt.Sprintf("ConnectionEnd A: %v\nConnectionEnd B: %v", connection, connection)}, + {"Channel", fmt.Sprintf("Channel A: %v\nChannel B: %v", channel, channel)}, + {"other", ""}, + } + + for i, tt := range tests { + i, tt := i, tt + t.Run(tt.name, func(t *testing.T) { + if i == len(tests)-1 { + require.Panics(t, func() { dec(kvPairs.Pairs[i], kvPairs.Pairs[i]) }, tt.name) + } else { + require.Equal(t, tt.expectedLog, dec(kvPairs.Pairs[i], kvPairs.Pairs[i]), tt.name) + } + }) + } +} diff --git a/core/simulation/genesis.go b/core/simulation/genesis.go new file mode 100644 index 0000000000..d71f449250 --- /dev/null +++ b/core/simulation/genesis.go @@ -0,0 +1,63 @@ +package simulation + +// DONTCOVER + +import ( + "encoding/json" + "fmt" + "math/rand" + + "github.com/cosmos/cosmos-sdk/types/module" + clientsims "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/simulation" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectionsims "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/simulation" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channelsims "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/simulation" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/types" +) + +// Simulation parameter constants +const ( + clientGenesis = "client_genesis" + connectionGenesis = "connection_genesis" + channelGenesis = "channel_genesis" +) + +// RandomizedGenState generates a random GenesisState for evidence +func RandomizedGenState(simState *module.SimulationState) { + var ( + clientGenesisState clienttypes.GenesisState + connectionGenesisState connectiontypes.GenesisState + channelGenesisState channeltypes.GenesisState + ) + + simState.AppParams.GetOrGenerate( + simState.Cdc, clientGenesis, &clientGenesisState, simState.Rand, + func(r *rand.Rand) { clientGenesisState = clientsims.GenClientGenesis(r, simState.Accounts) }, + ) + + simState.AppParams.GetOrGenerate( + simState.Cdc, connectionGenesis, &connectionGenesisState, simState.Rand, + func(r *rand.Rand) { connectionGenesisState = connectionsims.GenConnectionGenesis(r, simState.Accounts) }, + ) + + simState.AppParams.GetOrGenerate( + simState.Cdc, channelGenesis, &channelGenesisState, simState.Rand, + func(r *rand.Rand) { channelGenesisState = channelsims.GenChannelGenesis(r, simState.Accounts) }, + ) + + ibcGenesis := types.GenesisState{ + ClientGenesis: clientGenesisState, + ConnectionGenesis: connectionGenesisState, + ChannelGenesis: channelGenesisState, + } + + bz, err := json.MarshalIndent(&ibcGenesis, "", " ") + if err != nil { + panic(err) + } + fmt.Printf("Selected randomly generated %s parameters:\n%s\n", host.ModuleName, bz) + simState.GenState[host.ModuleName] = simState.Cdc.MustMarshalJSON(&ibcGenesis) +} diff --git a/core/simulation/genesis_test.go b/core/simulation/genesis_test.go new file mode 100644 index 0000000000..54aff75ad9 --- /dev/null +++ b/core/simulation/genesis_test.go @@ -0,0 +1,49 @@ +package simulation_test + +import ( + "encoding/json" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/simulation" + "github.com/cosmos/cosmos-sdk/x/ibc/core/types" +) + +// TestRandomizedGenState tests the normal scenario of applying RandomizedGenState. +// Abonormal scenarios are not tested here. +func TestRandomizedGenState(t *testing.T) { + interfaceRegistry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(interfaceRegistry) + + s := rand.NewSource(1) + r := rand.New(s) + + simState := module.SimulationState{ + AppParams: make(simtypes.AppParams), + Cdc: cdc, + Rand: r, + NumBonded: 3, + Accounts: simtypes.RandomAccounts(r, 3), + InitialStake: 1000, + GenState: make(map[string]json.RawMessage), + } + + // Remark: the current RandomizedGenState function + // is actually not random as it does not utilize concretely the random value r. + // This tests will pass for any value of r. + simulation.RandomizedGenState(&simState) + + var ibcGenesis types.GenesisState + simState.Cdc.MustUnmarshalJSON(simState.GenState[host.ModuleName], &ibcGenesis) + + require.NotNil(t, ibcGenesis.ClientGenesis) + require.NotNil(t, ibcGenesis.ConnectionGenesis) + require.NotNil(t, ibcGenesis.ChannelGenesis) +} diff --git a/core/spec/01_concepts.md b/core/spec/01_concepts.md new file mode 100644 index 0000000000..4347fb6741 --- /dev/null +++ b/core/spec/01_concepts.md @@ -0,0 +1,405 @@ + + +# Concepts + +> NOTE: if you are not familiar with the IBC terminology and concepts, please read +this [document](https://github.com/cosmos/ics/blob/master/ibc/1_IBC_TERMINOLOGY.md) as prerequisite reading. + +## Client Creation, Updates, and Upgrades + +IBC clients are on chain light clients. The light client is responsible for verifying +counterparty state. A light client can be created by any user submitting a valid initial +`ClientState` and `ConsensusState`. The client identifier is auto generated using the +client type and the global client counter appended in the format: `{client-type}-{N}`. +Clients are given a client identifier prefixed store to store their associated client +state and consensus states. Consensus states are stored using their associated height. + +Clients can be updated by any user submitting a valid `Header`. The client state callback +to `CheckHeaderAndUpdateState` is responsible for verifying the header against previously +stored state. The function should also return the updated client state and consensus state +if the header is considered a valid update. A light client, such as Tendermint, may have +client specific parameters like `TrustLevel` which must be considered valid in relation +to the `Header`. The update height is not necessarily the lastest height of the light +client. Updates may fill in missing consensus state heights. + +Clients may be upgraded. The upgrade should be verified using `VerifyUpgrade`. It is not +a requirement to allow for light client upgrades. For example, the solo machine client +will simply return an error on `VerifyUpgrade`. Clients which implement upgrades +are expected to account for, but not necessarily support, planned and unplanned upgrades. + +## Client Misbehaviour + +IBC clients must freeze when the counterparty chain becomes byzantine and +takes actions that could fool the light client into accepting invalid state +transitions. Thus, relayers are able to submit Misbehaviour proofs that prove +that a counterparty chain has signed two Headers for the same height. This +constitutes misbehaviour as the IBC client could have accepted either header +as valid. Upon verifying the misbehaviour the IBC client must freeze at that +height so that any proof verifications for the frozen height or later fail. + +Note, there is a difference between the chain-level Misbehaviour that IBC is +concerned with and the validator-level Evidence that Tendermint is concerned +with. Tendermint must be able to detect, submit, and punish any evidence of +individual validators breaking the Tendermint consensus protocol and attempting +to mount an attack. IBC clients must only act when an attack is successful +and the chain has successfully forked. In this case, valid Headers submitted +to the IBC client can no longer be trusted and the client must freeze. + +Governance may then choose to override a frozen client and provide the correct, +canonical Header so that the client can continue operating after the Misbehaviour +submission. + +## ClientUpdateProposal + +A governance proposal may be passed to update a specified client using another client +known as the "substitute client". This is useful in unfreezing clients or updating +expired clients, thereby making the effected channels active again. Each client is +expected to implement this functionality. A client may choose to disallow an update +by a governance proposal by returning an error in the client state function 'CheckSubstituteAndUpdateState'. + +The localhost client cannot be updated by a governance proposal. + +The solo machine client requires the boolean flag 'AllowUpdateAfterProposal' to be set +to true in order to be updated by a proposal. This is set upon client creation and cannot +be updated later. + +The tendermint client has two flags update flags, 'AllowUpdateAfterExpiry' and +'AllowUpdateAfterMisbehaviour'. The former flag can only be used to unexpire clients. The +latter flag can be used to unfreeze a client and if necessary it will also unexpire the client. +It is best practice to initialize a new substitute client instead of using an existing one +This avoids potential issues of the substitute becoming frozen due to misbehaviour or the +subject client becoming refrozen due to misbehaviour not being expired at the time the +proposal passes. These boolean flags are set upon client creation and cannot be updated later. + +The `CheckSubstituteAndUpdateState` function provides the light client with its own client +store, the client store of the substitute, the substitute client state, and the intitial +height that should be used when referring to the substitute client. Most light client +implementations should copy consensus states from the substitute to the subject, but +are not required to do so. Light clients may copy informationa as they deem necessary. + +It is not recommended to use a substitute client in normal operations since the subject +light client will be given unrestricted access to the substitute client store. Governance +should not pass votes which enable byzantine light client modules from modifying the state +of the substitute. + +## IBC Client Heights + +IBC Client Heights are represented by the struct: + +```go +type Height struct { + RevisionNumber uint64 + RevisionHeight uint64 +} +``` + +The `RevisionNumber` represents the revision of the chain that the height is representing. +An revision typically represents a continuous, monotonically increasing range of block-heights. +The `RevisionHeight` represents the height of the chain within the given revision. + +On any reset of the `RevisionHeight`, for example, when hard-forking a Tendermint chain, +the `RevisionNumber` will get incremented. This allows IBC clients to distinguish between a +block-height `n` of a previous revision of the chain (at revision `p`) and block-height `n` of the current +revision of the chain (at revision `e`). + +`Heights` that share the same revision number can be compared by simply comparing their respective `RevisionHeights`. +Heights that do not share the same revision number will only be compared using their respective `RevisionNumbers`. +Thus a height `h` with revision number `e+1` will always be greater than a height `g` with revision number `e`, +**REGARDLESS** of the difference in revision heights. + +Ex: + +```go +Height{RevisionNumber: 3, RevisionHeight: 0} > Height{RevisionNumber: 2, RevisionHeight: 100000000000} +``` + +When a Tendermint chain is running a particular revision, relayers can simply submit headers and proofs with the revision number +given by the chain's chainID, and the revision height given by the Tendermint block height. When a chain updates using a hard-fork +and resets its block-height, it is responsible for updating its chain-id to increment the revision number. +IBC Tendermint clients then verifies the revision number against their `ChainId` and treat the `RevisionHeight` as the Tendermint block-height. + +Tendermint chains wishing to use revisions to maintain persistent IBC connections even across height-resetting upgrades must format their chain-ids +in the following manner: `{chainID}-{revision_number}`. On any height-resetting upgrade, the chainID **MUST** be updated with a higher revision number +than the previous value. + +Ex: + +- Before upgrade ChainID: `gaiamainnet-3` +- After upgrade ChainID: `gaiamainnet-4` + +Clients that do not require revisions, such as the solo-machine client, simply hardcode `0` into the revision number whenever they +need to return an IBC height when implementing IBC interfaces and use the `RevisionHeight` exclusively. + +Other client-types may implement their own logic to verify the IBC Heights that relayers provide in their `Update`, `Misbehavior`, and +`Verify` functions respectively. + +The IBC interfaces expect an `ibcexported.Height` interface, however all clients should use the concrete implementation provided in +`02-client/types` and reproduced above. + +## Connection Handshake + +The connection handshake occurs in 4 steps as defined in [ICS 03](https://github.com/cosmos/ics/tree/master/spec/ics-003-connection-semantics). + +`ConnOpenInit` is the first attempt to initialize a connection on the executing chain. +The handshake is expected to succeed if the version selected is supported. The connection +identifier for the counterparty connection must be left empty indicating that the counterparty +must select its own identifier. The connection identifier is auto derived in the format: +`connection{N}` where N is the next sequence to be used. The counter begins at 0 and increments +by 1. The connection is set and stored in the INIT state upon success. + +`ConnOpenTry` is a response to a chain executing `ConnOpenInit`. The executing chain will validate +the chain level parameters the counterparty has stored such as its chainID. The executing chain +will also verify that if a previous connection exists for the specified connection identifier +that all the parameters match and its previous state was in INIT. This may occur when both +chains execute `ConnOpenInit` simultaneously. If the connection does not exist then a connection +identifier is generated in the same format done in `ConnOpenInit`. The executing chain will verify +that the counterparty created a connection in INIT state. The executing chain will also verify +The `ClientState` and `ConsensusState` the counterparty stores for the executing chain. The +executing chain will select a version from the intersection of its supported versions and the +versions set by the counterparty. The connection is set and stored in the TRYOPEN state upon +success. + +`ConnOpenAck` may be called on a chain when the counterparty connection has entered TRYOPEN. A +previous connection on the executing chain must exist in either INIT or TRYOPEN. The executing +chain will verify the version the counterparty selected. If the counterparty selected its own +connection identifier, it will be validated in the basic validation of a `MsgConnOpenAck`. +The counterparty connection state is verified along with the `ClientState` and `ConsensusState` +stored for the executing chain. The connection is set and stored in the OPEN state upon success. + +`ConnOpenConfirm` is a response to a chain executing `ConnOpenAck`. The executing chain's connection +must be in TRYOPEN. The counterparty connection state is verified to be in the OPEN state. The +connection is set and stored in the OPEN state upon success. + +## Connection Version Negotiation + +During the handshake procedure for connections a version is agreed +upon between the two parties. This occurs during the first 3 steps of the +handshake. + +During `ConnOpenInit`, party A is expected to set all the versions they wish +to support within their connection state. It is expected that this set of +versions is from most preferred to least preferred. This is not a strict +requirement for the SDK implementation of IBC because the party calling +`ConnOpenTry` will greedily select the latest version it supports that the +counterparty supports as well. A specific version can optionally be passed +as `Version` to ensure that the handshake will either complete with that +version or fail. + +During `ConnOpenTry`, party B will select a version from the counterparty's +supported versions. Priority will be placed on the latest supported version. +If a matching version cannot be found an error is returned. + +During `ConnOpenAck`, party A will verify that they can support the version +party B selected. If they do not support the selected version an error is +returned. After this step, the connection version is considered agreed upon. + + +A `Version` is defined as follows: + +```go +type Version struct { + // unique version identifier + Identifier string + // list of features compatible with the specified identifier + Features []string +} +``` + +A version must contain a non empty identifier. Empty feature sets are allowed, but each +feature must be a non empty string. + +::: warning +A set of versions should not contain two versions with the same +identifier, but differing feature sets. This will result in undefined behavior +with regards to version selection in `ConnOpenTry`. Each version in a set of +versions should have a unique version identifier. +::: + +## Channel Handshake + +The channel handshake occurs in 4 steps as defined in [ICS 04](https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics). + +`ChanOpenInit` is the first attempt to initialize a channel on top of an existing connection. +The handshake is expected to succeed if the version selected for the existing connection is a +supported IBC version. The portID must correspond to a port already binded upon `InitChain`. +The channel identifier for the counterparty channel must be left empty indicating that the +counterparty must select its own identifier. The channel identifier is auto derived in the +format: `channel{N}` where N is the next sequence to be used. The channel is set and stored +in the INIT state upon success. The channel parameters `NextSequenceSend`, `NextSequenceRecv`, +and `NextSequenceAck` are all set to 1 and a channel capability is created for the given +portID and channelID path. + +`ChanOpenTry` is a response to a chain executing `ChanOpenInit`. If the executing chain is calling +`ChanOpenTry` after previously executing `ChanOpenInit` then the provided channel parameters must +match the previously selected parameters. If the previous channel does not exist then a channel +identifier is generated in the same format as done in `ChanOpenInit`. The connection the channel +is created on top of must be an OPEN state and its IBC version must support the desired channel +type being created (ORDERED, UNORDERED, etc). The executing chain will verify that the channel +state of the counterparty is in INIT. The executing chain will set and store the channel state +in TRYOPEN. The channel parameters `NextSequenceSend`, `NextSequenceRecv`, and `NextSequenceAck` +are all set to 1 and a channel capability is created for the given portID and channelID path only +if the channel did not previously exist. + +`ChanOpenAck` may be called on a chain when the counterparty channel has entered TRYOPEN. A +previous channel on the executing chain must exist be in either INIT or TRYOPEN state. If the +counterparty selected its own channel identifier, it will be validated in the basic validation +of `MsgChanOpenAck`. The executing chain verifies that the counterparty channel state is in +TRYOPEN. The channel is set and stored in the OPEN state upon success. + +`ChanOpenConfirm` is a response to a chain executing `ChanOpenAck`. The executing chain's +previous channel state must be in TRYOPEN. The executing chain verifies that the counterparty +channel state is OPEN. The channel is set and stored in the OPEN state upon success. + +## Channel Version Negotiation + +During the channel handshake procedure a version must be agreed upon between +the two parties. The selection process is largely left to the callers and +the verification of valid versioning must be handled by application developers +in the channel handshake callbacks. + +During `ChanOpenInit`, a version string is passed in and set in party A's +channel state. + +During `ChanOpenTry`, a version string for party A and for party B are passed +in. The party A version string must match the version string used in +`ChanOpenInit` otherwise channel state verification will fail. The party B +version string could be anything (even different than the proposed one by +party A). However, the proposed version by party B is expected to be fully +supported by party A. + +During the `ChanOpenAck` callback, the application module is expected to verify +the version proposed by party B using the `MsgChanOpenAck` `CounterpartyVersion` +field. The application module should throw an error if the version string is +not valid. + +In general empty version strings are to be considered valid options for an +application module. + +Application modules may implement their own versioning system, such as semantic +versioning, or they may lean upon the versioning system used for in connection +version negotiation. To use the connection version semantics the application +would simply pass the proto encoded version into each of the handshake calls +and decode the version string into a `Version` instance to do version verification +in the handshake callbacks. + +Implementations which do not feel they would benefit from versioning can do +basic string matching using a single compatible version. + +## Sending, Receiving, Acknowledging Packets + +Terminology: +**Packet Commitment** A hash of the packet stored on the sending chain. +**Packet Receipt** A single bit indicating that a packet has been received. +Used for timeouts. +**Acknowledgement** Data written to indicate the result of receiving a packet. +Typically conveying either success or failure of the receive. + +A packet may be associated with one of the following states: +- the packet does not exist (ie it has not been sent) +- the packet has been sent but not received (the packet commitment exists on the +sending chain, but no receipt exists on the receiving chain) +- the packet has been received but not acknowledged (packet commitment exists +on the sending chain, a receipt exists on the receiving chain, but no acknowledgement +exists on the receiving chain) +- the packet has been acknowledgement but the acknowledgement has not been relayed +(the packet commitment exists on the sending chain, the receipt and acknowledgement +exist on the receiving chain) +- the packet has completed its life cycle (the packet commitment does not exist on +the sending chain, but a receipt and acknowledgement exist on the receiving chain) + +Sending of a packet is initiated by a call to the `ChannelKeeper.SendPacket` +function by an application module. Packets being sent will be verified for +correctness (core logic only). If the packet is valid, a hash of the packet +will be stored as a packet commitment using the packet sequence in the key. +Packet commitments are stored on the sending chain. + +A message should be sent to the receving chain indicating that the packet +has been committed on the sending chain and should be received on the +receiving chain. The light client on the receiving chain, which verifies +the sending chain's state, should be updated to the lastest sending chain +state if possible. The verification will fail if the latest state of the +light client does not include the packet commitment. The receiving chain +is responsible for verifying that the counterparty set the hash of the +packet. If verification of the packet to be received is successful, the +receiving chain should store a receipt of the packet and call application +logic if necessary. An acknowledgement may be processed and stored at this time (synchronously) +or at another point in the future (asynchronously). + +Acknowledgements written on the receiving chain may be verified on the +sending chain. If the sending chain successfully verifies the acknowledgement +then it may delete the packet commitment stored at that sequence. There is +no requirement for acknowledgements to be written. Only the hash of the +acknowledgement is stored on the chain. Application logic may be executed +in conjunction with verifying an acknowledgement. For example, in fungible +cross-chain token transfer, a failed acknowledgement results in locked or +burned funds being refunded. + +Relayers are responsible for reconstructing packets between the sending, +receiving, and acknowledging of packets. + +IBC applications sending and receiving packets are expected to appropriately +handle data contained within a packet. For example, cross-chain token +transfers will unmarshal the data into proto definitions representing +a token transfer. + +Future optimizations may allow for storage cleanup. Stored packet +commitments could be removed from channels which do not write +packet acknowledgements and acknowledgements could be removed +when a packet has completed its life cycle. + +## Timing out Packets + +A packet may be timed out on the receiving chain if the packet timeout height or timestamp has +been surpassed on the receving chain or the channel has closed. A timed out +packet can only occur if the packet has never been received on the receiving +chain. ORDERED channels will verify that the packet sequence is greater than +the `NextSequenceRecv` on the receiving chain. UNORDERED channels will verify +that the packet receipt has not been written on the receiving chain. A timeout +on channel closure will additionally verify that the counterparty channel has +been closed. A successful timeout may execute application logic as appropriate. + +Both the packet's timeout timestamp and the timeout height must have been +surpassed on the receiving chain for a timeout to be valid. A timeout timestamp +or timeout height with a 0 value indicates the timeout field may be ignored. +Each packet is required to have at least one valid timeout field. + +## Closing Channels + +Closing a channel occurs in occurs in 2 handshake steps as defined in [ICS 04](https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics). + +`ChanCloseInit` will close a channel on the executing chain if the channel exists, it is not +already closed and the connection it exists upon is OPEN. Channels can only be closed by a +calling module or in the case of a packet timeout on an ORDERED channel. + +`ChanCloseConfirm` is a response to a counterparty channel executing `ChanCloseInit`. The channel +on the executing chain will be closed if the channel exists, the channel is not already closed, +the connection the channel exists upon is OPEN and the executing chain successfully verifies +that the counterparty channel has been closed. + +## Port and Channel Capabilities + +## Hostname Validation + +Hostname validation is implemented as defined in [ICS 24](https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements). + +The 24-host sub-module parses and validates identifiers. It also builds +the key paths used to store IBC related information. + +A valid identifier must conatin only alphanumeric characters or the +following list of allowed characters: +".", "\_", "+", "-", "#", "[", "]", "<", ">" + +- Client identifiers must contain between 9 and 64 characters. +- Connection identifiers must contain between 10 and 64 characters. +- Channel identifiers must contain between 10 and 64 characters. +- Port identifiers must contain between 2 and 64 characters. + +## Proofs + +Proofs for counterparty state validation are provided as bytes. These bytes +can be unmarshaled into proto definitions as necessary by light clients. +For example, the Tendermint light client will use the bytes as a merkle +proof where as the solo machine client will unmarshal the proof into +several layers proto definitions used for signature verficiation. diff --git a/core/spec/02_state.md b/core/spec/02_state.md new file mode 100644 index 0000000000..2c85a525a9 --- /dev/null +++ b/core/spec/02_state.md @@ -0,0 +1,28 @@ + + +# State + +The paths for the values stored in state is defined [here](https://github.com/cosmos/ics/blob/master/spec/ics-024-host-requirements/README.md#path-space). +Additionally, the SDK adds a prefix to the path to be able to aggregate the values for querying purposes. +The client type is not stored since it can be obtained through the client state. + +| Prefix | Path | Value type | +|--------|-----------------------------------------------------------------------------|----------------| +| "0/" | "clients/{identifier}/clientState" | ClientState | +| "0/" | "clients/{identifier}/consensusStates/{height}" | ConsensusState | +| "0/" | "clients/{identifier}/connections" | []string | +| "0/" | "nextClientSequence | uint64 | +| "0/" | "connections/{identifier}" | ConnectionEnd | +| "0/" | "nextConnectionSequence" | uint64 | +| "0/" | "ports/{identifier}" | CapabilityKey | +| "0/" | "channelEnds/ports/{identifier}/channels/{identifier}" | ChannelEnd | +| "0/" | "nextChannelSequence" | uint64 | +| "0/" | "capabilities/ports/{identifier}/channels/{identifier}" | CapabilityKey | +| "0/" | "nextSequenceSend/ports/{identifier}/channels/{identifier}" | uint64 | +| "0/" | "nextSequenceRecv/ports/{identifier}/channels/{identifier}" | uint64 | +| "0/" | "nextSequenceAck/ports/{identifier}/channels/{identifier}" | uint64 | +| "0/" | "commitments/ports/{identifier}/channels/{identifier}/sequences/{sequence}" | bytes | +| "0/" | "receipts/ports/{identifier}/channels/{identifier}/sequences/{sequence}" | bytes | +| "0/" | "acks/ports/{identifier}/channels/{identifier}/sequences/{sequence}" | bytes | diff --git a/core/spec/03_state_transitions.md b/core/spec/03_state_transitions.md new file mode 100644 index 0000000000..518ff9247b --- /dev/null +++ b/core/spec/03_state_transitions.md @@ -0,0 +1,106 @@ + + +# State Transitions + +The described state transitions assume successful message exection. + +## Create Client + +`MsgCreateClient` will initialize and store a `ClientState` and `ConsensusState` in the sub-store +created using a generated client identifier. + +## Update Client + +`MsgUpdateClient` will update the `ClientState` and create a new `ConsensusState` for the +update height. + +## Misbehaviour + +`MsgSubmitMisbehaviour` will freeze a client. + +## Upgrade Client + +`MsgUpgradeClient` will upgrade the `ClientState` and `ConsensusState` to the update chain level +parameters and if applicable will update to the new light client implementation. + +## Client Update Proposal + +An Update Client Proposal will unfreeze a client (if necessary) and set an updated `ClientState`. +The light client may make optional modifications to the client prefixed store of the subject client +including copying `ConsensusStates` from the substitute to the subject. + +## Connection Open Init + +`MsgConnectionOpenInit` will initialize a connection state in INIT. + +## Connection Open Try + +`MsgConnectionOpenTry` will initialize or update a connection state to be in TRYOPEN. + +## Connection Open Ack + +`MsgConnectionOpenAck` will update a connection state from INIT or TRYOPEN to be in OPEN. + +## Connection Open Confirm + +`MsgConnectionOpenAck` will update a connection state from TRYOPEN to OPEN. + +## Channel Open Init + +`MsgChannelOpenInit` will initialize a channel state in INIT. It will create a channel capability +and set all Send, Receive and Ack Sequences to 1 for the channel. + +## Channel Open Try + +`MsgChannelOpenTry` will initialize or update a channel state to be in TRYOPEN. If the channel +is being initialized, It will create a channel capability and set all Send, Receive and Ack +Sequences to 1 for the channel. + +## Channel Open Ack + +`MsgChannelOpenAck` will update the channel state to OPEN. It will set the version and channel +identifier for its counterparty. + +## Channel Open Confirm + +`MsgChannelOpenConfirm` will update the channel state to OPEN. + +## Channel Close Init + +`MsgChannelCloseInit` will update the channel state to CLOSED. + +## Channel Close Confirm + +`MsgChannelCloseConfirm` will update the channel state to CLOSED. + +## Send Packet + +A application calling `ChannelKeeper.SendPacket` will incremenet the next sequence send and set +a hash of the packet as the packet commitment. + +## Receive Packet + +`MsgRecvPacket` will increment the next sequence receive for ORDERED channels and set a packet +receipt for UNORDERED channels. + +## Write Acknowledgement + +`WriteAcknowledgement` may be executed synchronously during the execution of `MsgRecvPacket` or +asynchonously by an application module. It writes an acknowledgement to the store. + +## Acknowledge Packet + +`MsgAcknowledgePacket` deletes the packet commitment and for ORDERED channels increments next +sequences ack. + +## Timeout Packet + +`MsgTimeoutPacket` deletes the packet commitment and for ORDERED channels sets the channel state +to CLOSED. + +## Timeout Packet on Channel Closure + +`MsgTimeoutOnClose` deletes the packet commitment and for ORDERED channels sets the channel state +to CLOSED. diff --git a/core/spec/04_messages.md b/core/spec/04_messages.md new file mode 100644 index 0000000000..3728e6d6f3 --- /dev/null +++ b/core/spec/04_messages.md @@ -0,0 +1,497 @@ + + +# Messages + +In this section we describe the processing of the IBC messages and the corresponding updates to the state. + +## ICS 02 - Client + +### MsgCreateClient + +A light client is created using the `MsgCreateClient`. + +```go +type MsgCreateClient struct { + ClientState *types.Any // proto-packed client state + ConsensusState *types.Any // proto-packed consensus state + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `ClientState` is empty or invalid +- `ConsensusState` is empty or invalid +- `Signer` is empty + +The message creates and stores a light client with an initial consensus state using a generated client +identifier. + +### MsgUpdateClient + +A light client is updated with a new header using the `MsgUpdateClient`. + +```go +type MsgUpdateClient struct { + ClientId string + Header *types.Any // proto-packed header + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `ClientId` is invalid (not alphanumeric or not within 10-20 characters) +- `Header` is empty or invalid +- `Signer` is empty +- A `ClientState` hasn't been created for the given ID +- The client is frozen due to misbehaviour and cannot be updated +- The header fails to provide a valid update for the client + +The message validates the header and updates the client state and consensus state for the +header height. + +### MsgUpgradeClient +```go +type MsgUpgradeClient struct { + ClientId string + ClientState *types.Any // proto-packed client state + UpgradeHeight *Height + ProofUpgrade []byte + Signer string +} +``` + +This message is expected to fail if: + +- `ClientId` is invalid (not alphanumeric or not within 10-20 characters) +- `ClientState` is empty or invalid +- `UpgradeHeight` is empty or zero +- `ProofUpgrade` is empty +- `Signer` is empty +- A `ClientState` hasn't been created for the given ID +- The client is frozen due to misbehaviour and cannot be upgraded +- The upgrade proof fails + +The message upgrades the client state and consensus state upon successful validation of a +chain upgrade. + +### MsgSubmitMisbehaviour + +Submit a evidence of light client misbehaviour to freeze the client state and prevent additional packets from being relayed. + +```go +type MsgSubmitMisbehaviour struct { + ClientId string + Misbehaviour *types.Any // proto-packed misbehaviour + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `ClientId` is invalid (not alphanumeric or not within 10-20 characters) +- `Misbehaviour` is empty or invalid +- `Signer` is empty +- A `ClientState` hasn't been created for the given ID +- `Misbehaviour` check failed + +The message verifies the misbehaviour and freezes the client. + +## ICS 03 - Connection + +### MsgConnectionOpenInit + +A connection is initialized on a light client using the `MsgConnectionOpenInit`. + +```go +type MsgConnectionOpenInit struct { + ClientId string + Counterparty Counterparty + Version string + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: +- `ClientId` is invalid (see naming requirements) +- `Counterparty` is empty +- 'Version' is not empty and invalid +- `Signer` is empty +- A Client hasn't been created for the given ID +- A Connection for the given ID already exists + +The message creates a connection for the given ID with an INIT state. + +### MsgConnectionOpenTry + +When a counterparty connection is initialized then a connection is initialized on a light client +using the `MsgConnectionOpenTry`. + +```go +type MsgConnectionOpenTry struct { + ClientId string + PreviousConnectionId string + ClientState *types.Any // proto-packed counterparty client + Counterparty Counterparty + CounterpartyVersions []string + ProofHeight Height + ProofInit []byte + ProofClient []byte + ProofConsensus []byte + ConsensusHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `ClientId` is invalid (see naming requirements) +- `PreviousConnectionId` is not empty and invalid (see naming requirements) +- `ClientState` is not a valid client of the executing chain +- `Counterparty` is empty +- `CounterpartyVersions` is empty +- `ProofHeight` is zero +- `ProofInit` is empty +- `ProofClient` is empty +- `ProofConsensus` is empty +- `ConsensusHeight` is zero +- `Signer` is empty +- A Client hasn't been created for the given ID +- If a previous connection exists but does not match the supplied parameters. +- `ProofInit` does not prove that the counterparty connection is in state INIT +- `ProofClient` does not prove that the counterparty has stored the `ClientState` provided in message +- `ProofConsensus` does not prove that the counterparty has the correct consensus state for this chain + +The message creates a connection for a generated connection ID with an TRYOPEN State. If a previous +connection already exists, it updates the connection state from INIT to TRYOPEN. + +### MsgConnectionOpenAck + +When a counterparty connection is initialized then a connection is opened on a light client +using the `MsgConnectionOpenAck`. + +```go +type MsgConnectionOpenAck struct { + ConnectionId string + CounterpartyConnectionId string + Version string + ClientState *types.Any // proto-packed counterparty client + ProofHeight Height + ProofTry []byte + ProofClient []byte + ProofConsensus []byte + ConsensusHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `ConnectionId` is invalid (see naming requirements) +- `CounterpartyConnectionId` is invalid (see naming requirements) +- `Version` is empty +- `ClientState` is not a valid client of the executing chain +- `ProofHeight` is zero +- `ProofTry` is empty +- `ProofClient` is empty +- `ProofConsensus` is empty +- `ConsensusHeight` is zero +- `Signer` is empty +- `ProofTry` does not prove that the counterparty connection is in state TRYOPEN +- `ProofClient` does not prove that the counterparty has stored the `ClientState` provided by message +- `ProofConsensus` does not prove that the counterparty has the correct consensus state for this chain + +The message sets the connection state for the given ID to OPEN. `CounterpartyConnectionId` +should be the `ConnectionId` used by the counterparty connection. + +### MsgConnectionOpenConfirm + +When a counterparty connection is opened then a connection is opened on a light client using +the `MsgConnectionOpenConfirm`. + +```go +type MsgConnectionOpenConfirm struct { + ConnectionId string + ProofAck []byte + ProofHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `ConnectionId` is invalid (see naming requirements) +- `ProofAck` is empty +- `ProofHeight` is zero +- `Signer` is empty +- A Connection with the given ID does not exist +- `ProofAck` does not prove that the counterparty connection is in state OPEN + +The message sets the connection state for the given ID to OPEN. + +## ICS 04 - Channels + +### MsgChannelOpenInit + +A channel handshake is initiated by a chain A using the `MsgChannelOpenInit` +message. + +```go +type MsgChannelOpenInit struct { + PortId string + Channel Channel + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `PortId` is invalid (see naming requirements) +- `Channel` is empty +- `Signer` is empty +- A Channel End exists for the given Channel ID and Port ID + +The message creates a channel on chain A with an INIT state for a generated Channel ID +and Port ID. + +### MsgChannelOpenTry + +A channel handshake initialization attempt is acknowledged by a chain B using +the `MsgChannelOpenTry` message. + +```go +type MsgChannelOpenTry struct { + PortId string + PreviousChannelId string + Channel Channel + CounterpartyVersion string + ProofInit []byte + ProofHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `PortId` is invalid (see naming requirements) +- `PreviousChannelId` is not empty and invalid (see naming requirements) +- `Channel` is empty +- `CounterpartyVersion` is empty +- `ProofInit` is empty +- `ProofHeight` is zero +- `Signer` is empty +- A previous channel exists and does not match the provided parameters. +- `ProofInit` does not prove that the counterparty's Channel state is in INIT + +The message creates a channel on chain B with an TRYOPEN state for using a generated Channel ID +and given Port ID if the previous channel does not already exist. Otherwise it udates the +previous channel state from INIT to TRYOPEN. + + +### MsgChannelOpenAck + +A channel handshake is opened by a chain A using the `MsgChannelOpenAck` message. + +```go +type MsgChannelOpenAck struct { + PortId string + ChannelId string + CounterpartyChannelId string + CounterpartyVersion string + ProofTry []byte + ProofHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `PortId` is invalid (see naming requirements) +- `ChannelId` is invalid (see naming requirements) +- `CounterpartyChannelId` is invalid (see naming requirements) +- `CounterpartyVersion` is empty +- `ProofTry` is empty +- `ProofHeight` is zero +- `Signer` is empty +- `ProofTry` does not prove that the counterparty's Channel state is in TRYOPEN + +The message sets a channel on chain A to state OPEN for the given Channel ID and Port ID. +`CounterpartyChannelId` should be the `ChannelId` used by the counterparty channel. + +### MsgChannelOpenConfirm + +A channel handshake is confirmed and opened by a chain B using the `MsgChannelOpenConfirm` +message. + +```go +type MsgChannelOpenConfirm struct { + PortId string + ChannelId string + ProofAck []byte + ProofHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `PortId` is invalid (see naming requirements) +- `ChannelId` is invalid (see naming requirements) +- `ProofAck` is empty +- `ProofHeight` is zero +- `Signer` is empty +- `ProofAck` does not prove that the counterparty's Channel state is in OPEN + +The message sets a channel on chain B to state OPEN for the given Channel ID and Port ID. + +### MsgChannelCloseInit + +A channel is closed on chain A using the `MsgChannelCloseInit`. + +```go +type MsgChannelCloseInit struct { + PortId string + ChannelId string + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `PortId` is invalid (see naming requirements) +- `ChannelId` is invalid (see naming requirements) +- `Signer` is empty +- A Channel for the given Port ID and Channel ID does not exist or is already closed + +The message closes a channel on chain A for the given Port ID and Channel ID. + +### MsgChannelCloseConfirm + +A channel is closed on chain B using the `MsgChannelCloseConfirm`. + +```go +type MsgChannelCloseConfirm struct { + PortId string + ChannelId string + ProofInit []byte + ProofHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `PortId` is invalid (see naming requirements) +- `ChannelId` is invalid (see naming requirements) +- `ProofInit` is empty +- `ProofHeight` is zero +- `Signer` is empty +- A Channel for the given Port ID and Channel ID does not exist or is already closed +- `ProofInit` does not prove that the counterparty set its channel to state CLOSED + +The message closes a channel on chain B for the given Port ID and Channel ID. + +### MsgRecvPacket + +A packet is received on chain B using the `MsgRecvPacket`. + +```go +type MsgRecvPacket struct { + Packet Packet + Proof []byte + ProofHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `Proof` is empty +- `ProofHeight` is zero +- `Signer` is empty +- `Packet` fails basic validation +- `Proof` does not prove that the counterparty sent the `Packet`. + +The message receives a packet on chain B. + +### MsgTimeout + +A packet is timed out on chain A using the `MsgTimeout`. + +```go +type MsgTimeout struct { + Packet Packet + Proof []byte + ProofHeight Height + NextSequenceRecv uint64 + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `Proof` is empty +- `ProofHeight` is zero +- `NextSequenceRecv` is zero +- `Signer` is empty +- `Packet` fails basic validation +- `Proof` does not prove that the packet has not been received on the counterparty chain. + +The message times out a packet that was sent on chain A and never received on chain B. + +### MsgTimeoutOnClose + +A packet is timed out on chain A due to the closure of the channel end on chain B using +the `MsgTimeoutOnClose`. + +```go +type MsgTimeoutOnClose struct { + Packet Packet + Proof []byte + ProofClose []byte + ProofHeight Height + NextSequenceRecv uint64 + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `Proof` is empty +- `ProofClose` is empty +- `ProofHeight` is zero +- `NextSequenceRecv` is zero +- `Signer` is empty +- `Packet` fails basic validation +- `Proof` does not prove that the packet has not been received on the counterparty chain. +- `ProofClose` does not prove that the counterparty channel end has been closed. + +The message times out a packet that was sent on chain A and never received on chain B. + +### MsgAcknowledgement + +A packet is acknowledged on chain A using the `MsgAcknowledgement`. + +```go +type MsgAcknowledgement struct { + Packet Packet + Acknowledgement []byte + Proof []byte + ProofHeight Height + Signer sdk.AccAddress +} +``` + +This message is expected to fail if: + +- `Proof` is empty +- `ProofHeight` is zero +- `Signer` is empty +- `Packet` fails basic validation +- `Acknowledgement` is empty +- `Proof` does not prove that the counterparty received the `Packet`. + +The message acknowledges that the packet sent from chainA was received on chain B. diff --git a/core/spec/05_callbacks.md b/core/spec/05_callbacks.md new file mode 100644 index 0000000000..dd74738025 --- /dev/null +++ b/core/spec/05_callbacks.md @@ -0,0 +1,80 @@ + + +# Callbacks + +Application modules implementing the IBC module must implement the following callbacks as found in [05-port](../05-port/types/module.go). +More information on how to implement these callbacks can be found in the [implementation guide](../../../../docs/ibc/custom.md). + +```go +// IBCModule defines an interface that implements all the callbacks +// that modules must define as specified in ICS-26 +type IBCModule interface { + OnChanOpenInit( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portId string, + channelId string, + channelCap *capability.Capability, + counterparty channeltypes.Counterparty, + version string, + ) error + + OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portId, + channelId string, + channelCap *capability.Capability, + counterparty channeltypes.Counterparty, + version, + counterpartyVersion string, + ) error + + OnChanOpenAck( + ctx sdk.Context, + portId, + channelId string, + counterpartyVersion string, + ) error + + OnChanOpenConfirm( + ctx sdk.Context, + portId, + channelId string, + ) error + + OnChanCloseInit( + ctx sdk.Context, + portId, + channelId string, + ) error + + OnChanCloseConfirm( + ctx sdk.Context, + portId, + channelId string, + ) error + + // OnRecvPacket must return the acknowledgement bytes + // In the case of an asynchronous acknowledgement, nil should be returned. + OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, + ) (*sdk.Result, []byte, error) + + OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + ) (*sdk.Result, error) + + OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, + ) (*sdk.Result, error) +} +``` diff --git a/core/spec/06_events.md b/core/spec/06_events.md new file mode 100644 index 0000000000..528a30cffa --- /dev/null +++ b/core/spec/06_events.md @@ -0,0 +1,241 @@ + + +# Events + +The IBC module emits the following events. It can be expected that the type `message`, +with an attirbute key of `action` will represent the first event for each message +being processed as emitted by the SDK's baseapp. Each IBC TAO message will +also emit its module name in the format 'ibc_sub-modulename'. + +All the events for the Channel handshakes, `SendPacket`, `RecvPacket`, `AcknowledgePacket`, +`TimeoutPacket` and `TimeoutOnClose` will emit additional events not specified here due to +callbacks to IBC applications. + +## ICS 02 - Client + +### MsgCreateClient + +| Type | Attribute Key | Attribute Value | +|---------------|------------------|-------------------| +| create_client | client_id | {clientId} | +| create_client | client_type | {clientType} | +| create_client | consensus_height | {consensusHeight} | +| message | action | create_client | +| message | module | ibc_client | + +### MsgUpdateClient + +| Type | Attribute Key | Attribute Value | +|---------------|------------------|-------------------| +| update_client | client_id | {clientId} | +| update_client | client_type | {clientType} | +| update_client | consensus_height | {consensusHeight} | +| message | action | update_client | +| message | module | ibc_client | + +### MsgSubmitMisbehaviour + +| Type | Attribute Key | Attribute Value | +|---------------------|------------------|---------------------| +| client_misbehaviour | client_id | {clientId} | +| client_misbehaviour | client_type | {clientType} | +| client_misbehaviour | consensus_height | {consensusHeight} | +| message | action | client_misbehaviour | +| message | module | evidence | +| message | sender | {senderAddress} | +| submit_evidence | evidence_hash | {evidenceHash} | + +### UpdateClientProposal + +| Type | Attribute Key | Attribute Value | +|------------------------|------------------|-------------------| +| update_client_proposal | client_id | {clientId} | +| update_client_proposal | client_type | {clientType} | +| update_client_proposal | consensus_height | {consensusHeight} | + + + +## ICS 03 - Connection + +### MsgConnectionOpenInit + +| Type | Attribute Key | Attribute Value | +|----------------------|----------------------------|-----------------------------| +| connection_open_init | connection_id | {connectionId} | +| connection_open_init | client_id | {clientId} | +| connection_open_init | counterparty_client_id | {counterparty.clientId} | +| message | action | connection_open_init | +| message | module | ibc_connection | + +### MsgConnectionOpenTry + +| Type | Attribute Key | Attribute Value | +|---------------------|----------------------------|-----------------------------| +| connection_open_try | connection_id | {connectionId} | +| connection_open_try | client_id | {clientId} | +| connection_open_try | counterparty_client_id | {counterparty.clientId | +| connection_open_try | counterparty_connection_id | {counterparty.connectionId} | +| message | action | connection_open_try | +| message | module | ibc_connection | + +### MsgConnectionOpenAck + +| Type | Attribute Key | Attribute Value | +|----------------------|----------------------------|-----------------------------| +| connection_open_ack | connection_id | {connectionId} | +| connection_open_ack | client_id | {clientId} | +| connection_open_ack | counterparty_client_id | {counterparty.clientId} | +| connection_open_ack | counterparty_connection_id | {counterparty.connectionId} | +| message | module | ibc_connection | +| message | action | connection_open_ack | + +### MsgConnectionOpenConfirm + +| Type | Attribute Key | Attribute Value | +|-------------------------|----------------------------|-----------------------------| +| connection_open_confirm | connection_id | {connectionId} | +| connection_open_confirm | client_id | {clientId} | +| connection_open_confirm | counterparty_client_id | {counterparty.clientId} | +| connection_open_confirm | counterparty_connection_id | {counterparty.connectionId} | +| message | action | connection_open_confirm | +| message | module | ibc_connection | + +## ICS 04 - Channel + +### MsgChannelOpenInit + +| Type | Attribute Key | Attribute Value | +|-------------------|-------------------------|----------------------------------| +| channel_open_init | port_id | {portId} | +| channel_open_init | channel_id | {channelId} | +| channel_open_init | counterparty_port_id | {channel.counterparty.portId} | +| channel_open_init | connection_id | {channel.connectionHops} | +| message | action | channel_open_init | +| message | module | ibc_channel | + +### MsgChannelOpenTry + +| Type | Attribute Key | Attribute Value | +|------------------|-------------------------|----------------------------------| +| channel_open_try | port_id | {portId} | +| channel_open_try | channel_id | {channelId} | +| channel_open_try | counterparty_port_id | {channel.counterparty.portId} | +| channel_open_try | counterparty_channel_id | {channel.counterparty.channelId} | +| channel_open_try | connection_id | {channel.connectionHops} | +| message | action | channel_open_try | +| message | module | ibc_channel | + +### MsgChannelOpenAck + +| Type | Attribute Key | Attribute Value | +|------------------|-------------------------|----------------------------------| +| channel_open_ack | port_id | {portId} | +| channel_open_ack | channel_id | {channelId} | +| channel_open_ack | counterparty_port_id | {channel.counterparty.portId} | +| channel_open_ack | counterparty_channel_id | {channel.counterparty.channelId} | +| channel_open_ack | connection_id | {channel.connectionHops} | +| message | action | channel_open_ack | +| message | module | ibc_channel | + +### MsgChannelOpenConfirm + +| Type | Attribute Key | Attribute Value | +|----------------------|-------------------------|----------------------------------| +| channel_open_confirm | port_id | {portId} | +| channel_open_confirm | channel_id | {channelId} | +| channel_open_confirm | counterparty_port_id | {channel.counterparty.portId} | +| channel_open_confirm | counterparty_channel_id | {channel.counterparty.channelId} | +| channel_open_confirm | connection_id | {channel.connectionHops} | +| message | module | ibc_channel | +| message | action | channel_open_confirm | + +### MsgChannelCloseInit + +| Type | Attribute Key | Attribute Value | +|--------------------|-------------------------|----------------------------------| +| channel_close_init | port_id | {portId} | +| channel_close_init | channel_id | {channelId} | +| channel_close_init | counterparty_port_id | {channel.counterparty.portId} | +| channel_close_init | counterparty_channel_id | {channel.counterparty.channelId} | +| channel_close_init | connection_id | {channel.connectionHops} | +| message | action | channel_close_init | +| message | module | ibc_channel | + +### MsgChannelCloseConfirm + +| Type | Attribute Key | Attribute Value | +|-----------------------|-------------------------|----------------------------------| +| channel_close_confirm | port_id | {portId} | +| channel_close_confirm | channel_id | {channelId} | +| channel_close_confirm | counterparty_port_id | {channel.counterparty.portId} | +| channel_close_confirm | counterparty_channel_id | {channel.counterparty.channelId} | +| channel_close_confirm | connection_id | {channel.connectionHops} | +| message | action | channel_close_confirm | +| message | module | ibc_channel | + +### SendPacket (application module call) + +| Type | Attribute Key | Attribute Value | +|-------------|--------------------------|----------------------------------| +| send_packet | packet_data | {data} | +| send_packet | packet_timeout_height | {timeoutHeight} | +| send_packet | packet_timeout_timestamp | {timeoutTimestamp} | +| send_packet | packet_sequence | {sequence} | +| send_packet | packet_src_port | {sourcePort} | +| send_packet | packet_src_channel | {sourceChannel} | +| send_packet | packet_dst_port | {destinationPort} | +| send_packet | packet_dst_channel | {destinationChannel} | +| send_packet | packet_channel_ordering | {channel.Ordering} | +| message | action | application-module-defined-field | +| message | module | ibc-channel | + +### MsgRecvPacket + +| Type | Attribute Key | Attribute Value | +|-------------|--------------------------|----------------------| +| recv_packet | packet_data | {data} | +| recv_packet | packet_ack | {acknowledgement} | +| recv_packet | packet_timeout_height | {timeoutHeight} | +| recv_packet | packet_timeout_timestamp | {timeoutTimestamp} | +| recv_packet | packet_sequence | {sequence} | +| recv_packet | packet_src_port | {sourcePort} | +| recv_packet | packet_src_channel | {sourceChannel} | +| recv_packet | packet_dst_port | {destinationPort} | +| recv_packet | packet_dst_channel | {destinationChannel} | +| recv_packet | packet_channel_ordering | {channel.Ordering} | +| message | action | recv_packet | +| message | module | ibc-channel | + +### MsgAcknowledgePacket + +| Type | Attribute Key | Attribute Value | +|--------------------|--------------------------|----------------------| +| acknowledge_packet | packet_timeout_height | {timeoutHeight} | +| acknowledge_packet | packet_timeout_timestamp | {timeoutTimestamp} | +| acknowledge_packet | packet_sequence | {sequence} | +| acknowledge_packet | packet_src_port | {sourcePort} | +| acknowledge_packet | packet_src_channel | {sourceChannel} | +| acknowledge_packet | packet_dst_port | {destinationPort} | +| acknowledge_packet | packet_dst_channel | {destinationChannel} | +| acknowledge_packet | packet_channel_ordering | {channel.Ordering} | +| message | action | acknowledge_packet | +| message | module | ibc-channel | + +### MsgTimeoutPacket & MsgTimeoutOnClose + +| Type | Attribute Key | Attribute Value | +|----------------|--------------------------|----------------------| +| timeout_packet | packet_timeout_height | {timeoutHeight} | +| timeout_packet | packet_timeout_timestamp | {timeoutTimestamp} | +| timeout_packet | packet_sequence | {sequence} | +| timeout_packet | packet_src_port | {sourcePort} | +| timeout_packet | packet_src_channel | {sourceChannel} | +| timeout_packet | packet_dst_port | {destinationPort} | +| timeout_packet | packet_dst_channel | {destinationChannel} | +| timeout_packet | packet_channel_ordering | {channel.Ordering} | +| message | action | timeout_packet | +| message | module | ibc-channel | + + diff --git a/core/spec/07_params.md b/core/spec/07_params.md new file mode 100644 index 0000000000..67e79ef81d --- /dev/null +++ b/core/spec/07_params.md @@ -0,0 +1,21 @@ + + +# Parameters + +## Clients + +The ibc clients contain the following parameters: + +| Key | Type | Default Value | +|------------------|------|---------------| +| `AllowedClients` | []string | `"06-solomachine","07-tendermint"` | + +### AllowedClients + +The allowed clients parameter defines an allowlist of client types supported by the chain. A client +that is not registered on this list will fail upon creation or on genesis validation. Note that, +since the client type is an arbitrary string, chains they must not register two light clients which +return the same value for the `ClientType()` function, otherwise the allowlist check can be +bypassed. diff --git a/core/spec/README.md b/core/spec/README.md new file mode 100644 index 0000000000..f6de9749b5 --- /dev/null +++ b/core/spec/README.md @@ -0,0 +1,26 @@ + + +# `ibc core` + +## Abstract + +This paper defines the implementation of the IBC protocol on the Cosmos SDK, the +changes made to the specification and where to find each specific ICS spec within +the module. + +For the general specification please refer to the [Interchain Standards](https://github.com/cosmos/ics). + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[State Transitions](03_state_transitions.md)** +4. **[Messages](04_messages.md)** +5. **[Callbacks](05_callbacks.md)** +6. **[Events](06_events.md)** +7. **[Params](07_params.md)** diff --git a/core/types/codec.go b/core/types/codec.go new file mode 100644 index 0000000000..db110ac9d5 --- /dev/null +++ b/core/types/codec.go @@ -0,0 +1,23 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" +) + +// RegisterInterfaces registers x/ibc interfaces into protobuf Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + clienttypes.RegisterInterfaces(registry) + connectiontypes.RegisterInterfaces(registry) + channeltypes.RegisterInterfaces(registry) + solomachinetypes.RegisterInterfaces(registry) + ibctmtypes.RegisterInterfaces(registry) + localhosttypes.RegisterInterfaces(registry) + commitmenttypes.RegisterInterfaces(registry) +} diff --git a/core/types/genesis.go b/core/types/genesis.go new file mode 100644 index 0000000000..f7d78e5c11 --- /dev/null +++ b/core/types/genesis.go @@ -0,0 +1,38 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +var _ codectypes.UnpackInterfacesMessage = GenesisState{} + +// DefaultGenesisState returns the ibc module's default genesis state. +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + ClientGenesis: clienttypes.DefaultGenesisState(), + ConnectionGenesis: connectiontypes.DefaultGenesisState(), + ChannelGenesis: channeltypes.DefaultGenesisState(), + } +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces +func (gs GenesisState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return gs.ClientGenesis.UnpackInterfaces(unpacker) +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs *GenesisState) Validate() error { + if err := gs.ClientGenesis.Validate(); err != nil { + return err + } + + if err := gs.ConnectionGenesis.Validate(); err != nil { + return err + } + + return gs.ChannelGenesis.Validate() +} diff --git a/core/types/genesis.pb.go b/core/types/genesis.pb.go new file mode 100644 index 0000000000..fc52b6f1dc --- /dev/null +++ b/core/types/genesis.pb.go @@ -0,0 +1,440 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/core/types/v1/genesis.proto + +package types + +import ( + fmt "fmt" + types "github.com/cosmos/ibc-go/core/02-client/types" + types1 "github.com/cosmos/ibc-go/core/03-connection/types" + types2 "github.com/cosmos/ibc-go/core/04-channel/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ibc module's genesis state. +type GenesisState struct { + // ICS002 - Clients genesis state + ClientGenesis types.GenesisState `protobuf:"bytes,1,opt,name=client_genesis,json=clientGenesis,proto3" json:"client_genesis" yaml:"client_genesis"` + // ICS003 - Connections genesis state + ConnectionGenesis types1.GenesisState `protobuf:"bytes,2,opt,name=connection_genesis,json=connectionGenesis,proto3" json:"connection_genesis" yaml:"connection_genesis"` + // ICS004 - Channel genesis state + ChannelGenesis types2.GenesisState `protobuf:"bytes,3,opt,name=channel_genesis,json=channelGenesis,proto3" json:"channel_genesis" yaml:"channel_genesis"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_f0cf35a95987cc01, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetClientGenesis() types.GenesisState { + if m != nil { + return m.ClientGenesis + } + return types.GenesisState{} +} + +func (m *GenesisState) GetConnectionGenesis() types1.GenesisState { + if m != nil { + return m.ConnectionGenesis + } + return types1.GenesisState{} +} + +func (m *GenesisState) GetChannelGenesis() types2.GenesisState { + if m != nil { + return m.ChannelGenesis + } + return types2.GenesisState{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibcgo.core.types.v1.GenesisState") +} + +func init() { proto.RegisterFile("ibcgo/core/types/v1/genesis.proto", fileDescriptor_f0cf35a95987cc01) } + +var fileDescriptor_f0cf35a95987cc01 = []byte{ + // 313 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x4a, 0xc3, 0x30, + 0x1c, 0xc7, 0xdb, 0x09, 0x1e, 0xaa, 0x4e, 0xac, 0x7f, 0xd0, 0x81, 0x99, 0x4b, 0x61, 0x78, 0x31, + 0xa1, 0x7a, 0x13, 0xbc, 0xec, 0xe2, 0xbd, 0xde, 0xbc, 0x48, 0x1b, 0x42, 0x1b, 0x68, 0x93, 0xb1, + 0xc6, 0xe2, 0xde, 0xc2, 0xc7, 0xda, 0x71, 0x47, 0x4f, 0x63, 0xb4, 0x6f, 0xe0, 0x13, 0xc8, 0x92, + 0xd8, 0xb5, 0xe4, 0x56, 0xbe, 0xfd, 0xfc, 0xbe, 0x9f, 0xfc, 0xf3, 0x26, 0x2c, 0x21, 0xa9, 0xc0, + 0x44, 0x2c, 0x28, 0x96, 0xcb, 0x39, 0x2d, 0x71, 0x15, 0xe2, 0x94, 0x72, 0x5a, 0xb2, 0x12, 0xcd, + 0x17, 0x42, 0x0a, 0xff, 0x5c, 0x21, 0x68, 0x87, 0x20, 0x85, 0xa0, 0x2a, 0x1c, 0x5d, 0xa4, 0x22, + 0x15, 0xea, 0x3f, 0xde, 0x7d, 0x69, 0x74, 0x04, 0x3b, 0x6d, 0x24, 0x67, 0x94, 0x4b, 0xab, 0x6e, + 0x34, 0xed, 0x32, 0x82, 0x73, 0x4a, 0x24, 0x13, 0xdc, 0xe6, 0x82, 0x2e, 0x97, 0xc5, 0x9c, 0xd3, + 0xdc, 0x82, 0xe0, 0x76, 0xe0, 0x1d, 0xbf, 0xea, 0xe4, 0x4d, 0xc6, 0x92, 0xfa, 0x99, 0x37, 0xd4, + 0xe2, 0x0f, 0x03, 0x5e, 0xbb, 0x77, 0xee, 0xfd, 0xd1, 0x23, 0x44, 0x9d, 0x5d, 0x68, 0x02, 0x55, + 0x21, 0xea, 0xce, 0xce, 0x6e, 0x57, 0x9b, 0xb1, 0xf3, 0xbb, 0x19, 0x5f, 0x2e, 0xe3, 0x22, 0x7f, + 0x86, 0xfd, 0x1e, 0x18, 0x9d, 0xe8, 0xc0, 0x8c, 0xf8, 0x5f, 0x9e, 0xbf, 0x5f, 0x7e, 0x6b, 0x1b, + 0x28, 0xdb, 0xb4, 0x67, 0x6b, 0x29, 0xcb, 0x38, 0x31, 0xc6, 0x1b, 0x63, 0xb4, 0xfa, 0x60, 0x74, + 0xb6, 0x0f, 0xff, 0xcd, 0xb9, 0x77, 0x6a, 0x0e, 0xa4, 0xd5, 0x1e, 0x28, 0x6d, 0xd0, 0xd3, 0x6a, + 0xc4, 0x72, 0x02, 0xe3, 0xbc, 0x32, 0xce, 0x7e, 0x13, 0x8c, 0x86, 0x26, 0x31, 0x43, 0xb3, 0x97, + 0x55, 0x0d, 0xdc, 0x75, 0x0d, 0xdc, 0x6d, 0x0d, 0xdc, 0xef, 0x06, 0x38, 0xeb, 0x06, 0x38, 0x3f, + 0x0d, 0x70, 0xde, 0x83, 0x94, 0xc9, 0xec, 0x33, 0x41, 0x44, 0x14, 0x98, 0x88, 0xb2, 0x10, 0x25, + 0x66, 0x09, 0x79, 0xe8, 0x3d, 0xa7, 0xe4, 0x50, 0x5d, 0xd4, 0xd3, 0x5f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x44, 0x1f, 0x35, 0xd8, 0x69, 0x02, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ChannelGenesis.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.ConnectionGenesis.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ClientGenesis.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ClientGenesis.Size() + n += 1 + l + sovGenesis(uint64(l)) + l = m.ConnectionGenesis.Size() + n += 1 + l + sovGenesis(uint64(l)) + l = m.ChannelGenesis.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientGenesis", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientGenesis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionGenesis", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConnectionGenesis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelGenesis", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ChannelGenesis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/core/types/query.go b/core/types/query.go new file mode 100644 index 0000000000..fba69b3a19 --- /dev/null +++ b/core/types/query.go @@ -0,0 +1,26 @@ +package types + +import ( + "github.com/gogo/protobuf/grpc" + + client "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connection "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channel "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// QueryServer defines the IBC interfaces that the gRPC query server must implement +type QueryServer interface { + clienttypes.QueryServer + connectiontypes.QueryServer + channeltypes.QueryServer +} + +// RegisterQueryService registers each individual IBC submodule query service +func RegisterQueryService(server grpc.Server, queryService QueryServer) { + client.RegisterQueryService(server, queryService) + connection.RegisterQueryService(server, queryService) + channel.RegisterQueryService(server, queryService) +} diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000..a699c10abd --- /dev/null +++ b/docs/README.md @@ -0,0 +1,114 @@ + + +# `ibc` + +## Abstract + +This specification defines the implementation of the IBC protocol on the Cosmos SDK, the +changes made to the specification and where to find each specific ICS spec within +the module. + +For the general specification please refer to the [Interchain Standards](https://github.com/cosmos/ics). + +## Contents + +1. **Applications** + + 1.1. [Transfer](./../applications/transfer/spec/README.md) +2. **[Core](./../core/spec/README.md)** +3. **Light Clients** + + 3.1 [Solo Machine Client](./../light-clients/06-solomachine/spec/README.md) + + 3.2 [Tendermint Client](./../light-clients/07-tendermint/spec/README.md) + + 3.3 [Localhost Client](./../light-clients/09-localhost/spec/README.md) + +## Implementation Details + +As stated above, the IBC implementation on the Cosmos SDK introduces some changes +to the general specification, in order to avoid code duplication and to take +advantage of the SDK architectural components such as the transaction routing +through `Handlers`. + +### Interchain Standards reference + +The following list is a mapping from each Interchain Standard to their implementation +in the SDK's `x/ibc` module: + +* [ICS 002 - Client Semantics](https://github.com/cosmos/ics/tree/master/spec/ics-002-client-semantics): Implemented in [`x/ibc/core/02-client`](https://github.com/cosmos/tree/master/ibc/core/02-client) +* [ICS 003 - Connection Semantics](https://github.com/cosmos/ics/blob/master/spec/ics-003-connection-semantics): Implemented in [`x/ibc/core/03-connection`](https://github.com/cosmos/tree/master/ibc/core/03-connection) +* [ICS 004 - Channel and Packet Semantics](https://github.com/cosmos/ics/blob/master/spec/ics-004-channel-and-packet-semantics): Implemented in [`x/ibc/core/04-channel`](https://github.com/cosmos/tree/master/ibc/core/04-channel) +* [ICS 005 - Port Allocation](https://github.com/cosmos/ics/blob/master/spec/ics-005-port-allocation): Implemented in [`x/ibc/core/05-port`](https://github.com/cosmos/tree/master/ibc/core/05-port) +* [ICS 006 - Solo Machine Client](https://github.com/cosmos/ics/blob/master/spec/ics-006-solo-machine-client): Implemented in [`x/ibc/light-clients/06-solomachine`](https://github.com/cosmos/tree/master/ibc/solomachine) +* [ICS 007 - Tendermint Client](https://github.com/cosmos/ics/blob/master/spec/ics-007-tendermint-client): Implemented in [`x/ibc/light-clients/07-tendermint`](https://github.com/cosmos/tree/master/ibc/light-clients/07-tendermint) +* [ICS 009 - Loopback Client](https://github.com/cosmos/ics/blob/master/spec/ics-009-loopback-client): Implemented in [`x/ibc/light-clients/09-localhost`](https://github.com/cosmos/tree/master/ibc/light-clients/09-localhost) +* [ICS 018- Relayer Algorithms](https://github.com/cosmos/ics/tree/master/spec/ics-018-relayer-algorithms): Implemented in it's own [relayer repository](https://github.com/cosmos/relayer) +* [ICS 020 - Fungible Token Transfer](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer): Implemented in [`x/ibc/applications/transfer`](https://github.com/cosmos/tree/master/ibc/applications/transfer) +* [ICS 023 - Vector Commitments](https://github.com/cosmos/ics/tree/master/spec/ics-023-vector-commitments): Implemented in [`x/ibc/core/23-commitment`](https://github.com/cosmos/tree/master/ibc/core/23-commitment) +* [ICS 024 - Host Requirements](https://github.com/cosmos/ics/tree/master/spec/ics-024-host-requirements): Implemented in [`x/ibc/core/24-host`](https://github.com/cosmos/tree/master/ibc/core/24-host) +* [ICS 025 - Handler Interface](https://github.com/cosmos/ics/tree/master/spec/ics-025-handler-interface): `Handler` interfaces are implemented at the top level in `x/ibc/handler.go`, +which call each ICS submodule's handlers (i.e `x/ibc/*/{XX-ICS}/handler.go`). +* [ICS 026 - Routing Module](https://github.com/cosmos/ics/blob/master/spec/ics-026-routing-module): Replaced by [ADR 15 - IBC Packet Receiver](../../../docs/architecture/adr-015-ibc-packet-receiver.md). + +### Architecture Decision Records (ADR) + +The following ADR provide the design and architecture decision of IBC-related components. + +* [ADR 001 - Coin Source Tracing](../../../docs/architecture/adr-001-coin-source-tracing.md): standard to hash the ICS20's fungible token +denomination trace path in order to support special characters and limit the maximum denomination length. +* [ADR 17 - Historical Header Module](../../../docs/architecture/adr-017-historical-header-module.md): Introduces the ability to introspect past +consensus states in order to verify their membership in the counterparty clients. +* [ADR 19 - Protobuf State Encoding](../../../docs/architecture/adr-019-protobuf-state-encoding.md): Migration from Amino to Protobuf for state encoding. +* [ADR 020 - Protocol Buffer Transaction Encoding](./../../docs/architecture/adr-020-protobuf-transaction-encoding.md): Client side migration to Protobuf. +* [ADR 021 - Protocol Buffer Query Encoding](../../../docs/architecture/adr-020-protobuf-query-encoding.md): Queries migration to Protobuf. +* [ADR 026 - IBC Client Recovery Mechanisms](../../../docs/architecture/adr-026-ibc-client-recovery-mechanisms.md): Allows IBC Clients to be recovered after freezing or expiry. + +### SDK Modules + +* [`x/capability`](https://github.com/cosmos/tree/master/x/capability): The capability module provides object-capability keys support through scoped keepers in order to authenticate usage of ports or channels. Check [ADR 3 - Dynamic Capability Store](../../../docs/architecture/adr-003-dynamic-capability-store.md) for more details. + +## IBC module architecture + +> **NOTE for auditors**: If you're not familiar with the overall module structure from +the SDK modules, please check this [document](../../../docs/building-modules/structure.md) as +prerequisite reading. + +For ease of auditing, every Interchain Standard has been developed in its own +package. The development team separated the IBC TAO (Transport, Authentication, Ordering) ICS specifications from the IBC application level +specification. The following tree describes the architecture of the directories that +the `ibc` (TAO) and `ibc-transfer` ([ICS20](https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer)) modules: + +```shell +x/ibc +├── applications/ +│ └──transfer/ +├── core/ +│   ├── 02-client/ +│   ├── 03-connection/ +│   ├── 04-channel/ +│   ├── 05-port/ +│   ├── 23-commitment/ +│   ├── 24-host/ +│  ├── client +│  │   └── cli +│ │       └── cli.go +│  ├── keeper +│  │ ├── keeper.go +│   │ └── querier.go +│ ├── types +│ │ ├── errors.go +│ │ └── keys.go +│ ├── handler.go +│ └── module.go +├── light-clients/ +│   ├── 06-solomachine/ +│   ├── 07-tendermint/ +│   └── 09-localhost/ +└── testing/ +``` diff --git a/docs/ibc/proto-docs.md b/docs/ibc/proto-docs.md new file mode 100644 index 0000000000..1c9f5c2957 --- /dev/null +++ b/docs/ibc/proto-docs.md @@ -0,0 +1,7521 @@ + + + + + Protocol Documentation + + + + + + + + + + +

Protocol Documentation

+ +

Table of Contents

+ +
+ +
+ + + +
+

ibcgo/apps/transfer/v1/transfer.proto

Top +
+

+ + +

DenomTrace

+

DenomTrace contains the base denomination for ICS20 fungible tokens and the

source tracing information path.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathstring

path defines the chain of port/channel identifiers used for tracing the +source of the fungible token.

base_denomstring

base denomination of the relayed fungible token.

+ + + + + +

FungibleTokenPacketData

+

FungibleTokenPacketData defines a struct for the packet payload

See FungibleTokenPacketData spec:

https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
denomstring

the token denomination to be transferred

amountuint64

the token amount to be transferred

senderstring

the sender address

receiverstring

the recipient address on the destination chain

+ + + + + +

Params

+

Params defines the set of IBC transfer parameters.

NOTE: To prevent a single token from being transferred, set the

TransfersEnabled parameter to true and then set the bank module's SendEnabled

parameter for the denomination to false.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
send_enabledbool

send_enabled enables or disables all cross-chain token transfers from this +chain.

receive_enabledbool

receive_enabled enables or disables all cross-chain token transfers to this +chain.

+ + + + + + + + + + + + + +
+

ibcgo/apps/transfer/v1/genesis.proto

Top +
+

+ + +

GenesisState

+

GenesisState defines the ibc-transfer genesis state

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

denom_tracesDenomTracerepeated

paramsParams

+ + + + + + + + + + + + + +
+

ibcgo/apps/transfer/v1/query.proto

Top +
+

+ + +

QueryDenomTraceRequest

+

QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC

method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
hashstring

hash (in hex format) of the denomination trace information.

+ + + + + +

QueryDenomTraceResponse

+

QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC

method.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
denom_traceDenomTrace

denom_trace returns the requested denomination trace information.

+ + + + + +

QueryDenomTracesRequest

+

QueryConnectionsRequest is the request type for the Query/DenomTraces RPC

method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
paginationcosmos.base.query.v1beta1.PageRequest

pagination defines an optional pagination for the request.

+ + + + + +

QueryDenomTracesResponse

+

QueryConnectionsResponse is the response type for the Query/DenomTraces RPC

method.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
denom_tracesDenomTracerepeated

denom_traces returns all denominations trace information.

paginationcosmos.base.query.v1beta1.PageResponse

pagination defines the pagination in the response.

+ + + + + +

QueryParamsRequest

+

QueryParamsRequest is the request type for the Query/Params RPC method.

+ + + + + +

QueryParamsResponse

+

QueryParamsResponse is the response type for the Query/Params RPC method.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
paramsParams

params defines the parameters of the module.

+ + + + + + + + + + + +

Query

+

Query provides defines the gRPC querier service.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
DenomTraceQueryDenomTraceRequestQueryDenomTraceResponse

DenomTrace queries a denomination trace information.

DenomTracesQueryDenomTracesRequestQueryDenomTracesResponse

DenomTraces queries all denomination traces.

ParamsQueryParamsRequestQueryParamsResponse

Params queries all parameters of the ibc-transfer module.

+ + + + +

Methods with HTTP bindings

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameMethodPatternBody
DenomTraceGET/ibc/apps/transfer/v1/denom_traces/{hash}
DenomTracesGET/ibc/apps/transfer/v1/denom_traces
ParamsGET/ibc/apps/transfer/v1/params
+ + + + +
+

ibcgo/core/client/v1/client.proto

Top +
+

+ + +

ClientConsensusStates

+

ClientConsensusStates defines all the stored consensus states for a given

client.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client identifier

consensus_statesConsensusStateWithHeightrepeated

consensus states and their heights associated with the client

+ + + + + +

ClientUpdateProposal

+

ClientUpdateProposal is a governance proposal. If it passes, the substitute

client's consensus states starting from the 'initial height' are copied over

to the subjects client state. The proposal handler may fail if the subject

and the substitute do not match in client and chain parameters (with

exception to latest height, frozen height, and chain-id). The updated client

must also be valid (cannot be expired).

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
titlestring

the title of the update proposal

descriptionstring

the description of the proposal

subject_client_idstring

the client identifier for the client to be updated if the proposal passes

substitute_client_idstring

the substitute client identifier for the client standing in for the subject +client

initial_heightHeight

the intital height to copy consensus states from the substitute to the +subject

+ + + + + +

ConsensusStateWithHeight

+

ConsensusStateWithHeight defines a consensus state with an additional height

field.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
heightHeight

consensus state height

consensus_stategoogle.protobuf.Any

consensus state

+ + + + + +

Height

+

Height is a monotonically increasing data type

that can be compared against another Height for the purposes of updating and

freezing clients

Normally the RevisionHeight is incremented at each height while keeping

RevisionNumber the same. However some consensus algorithms may choose to

reset the height in certain conditions e.g. hard forks, state-machine

breaking changes In these cases, the RevisionNumber is incremented so that

height continues to be monitonically increasing even as the RevisionHeight

gets reset

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
revision_numberuint64

the revision that the client is currently on

revision_heightuint64

the height within the given revision

+ + + + + +

IdentifiedClientState

+

IdentifiedClientState defines a client state with an additional client

identifier field.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client identifier

client_stategoogle.protobuf.Any

client state

+ + + + + +

Params

+

Params defines the set of IBC light client parameters.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
allowed_clientsstringrepeated

allowed_clients defines the list of allowed client state types.

+ + + + + + + + + + + + + +
+

ibcgo/apps/transfer/v1/tx.proto

Top +
+

+ + +

MsgTransfer

+

MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between

ICS20 enabled chains. See ICS Spec here:

https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
source_portstring

the port on which the packet will be sent

source_channelstring

the channel by which the packet will be sent

tokencosmos.base.v1beta1.Coin

the tokens to be transferred

senderstring

the sender address

receiverstring

the recipient address on the destination chain

timeout_heightibcgo.core.client.v1.Height

Timeout height relative to the current block height. +The timeout is disabled when set to 0.

timeout_timestampuint64

Timeout timestamp (in nanoseconds) relative to the current block timestamp. +The timeout is disabled when set to 0.

+ + + + + +

MsgTransferResponse

+

MsgTransferResponse defines the Msg/Transfer response type.

+ + + + + + + + + + + +

Msg

+

Msg defines the ibc/transfer Msg service.

+ + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
TransferMsgTransferMsgTransferResponse

Transfer defines a rpc handler method for MsgTransfer.

+ + + + +
+

ibcgo/core/channel/v1/channel.proto

Top +
+

+ + +

Acknowledgement

+

Acknowledgement is the recommended acknowledgement format to be used by

app-specific protocols.

NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental

conflicts with other protobuf message formats used for acknowledgements.

The first byte of any message with this format will be the non-ASCII values

`0xaa` (result) or `0xb2` (error). Implemented as defined by ICS:

https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
resultbytes

errorstring

+ + + + + +

Channel

+

Channel defines pipeline for exactly-once packet delivery between specific

modules on separate blockchains, which has at least one end capable of

sending packets and one end capable of receiving packets.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
stateState

current state of the channel end

orderingOrder

whether the channel is ordered or unordered

counterpartyCounterparty

counterparty channel end

connection_hopsstringrepeated

list of connection identifiers, in order, along which packets sent on +this channel will travel

versionstring

opaque channel version, which is agreed upon during the handshake

+ + + + + +

Counterparty

+

Counterparty defines a channel end counterparty

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port on the counterparty chain which owns the other end of the channel.

channel_idstring

channel end on the counterparty chain

+ + + + + +

IdentifiedChannel

+

IdentifiedChannel defines a channel with additional port and channel

identifier fields.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
stateState

current state of the channel end

orderingOrder

whether the channel is ordered or unordered

counterpartyCounterparty

counterparty channel end

connection_hopsstringrepeated

list of connection identifiers, in order, along which packets sent on +this channel will travel

versionstring

opaque channel version, which is agreed upon during the handshake

port_idstring

port identifier

channel_idstring

channel identifier

+ + + + + +

Packet

+

Packet defines a type that carries data across different chains through IBC

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
sequenceuint64

number corresponds to the order of sends and receives, where a Packet +with an earlier sequence number must be sent and received before a Packet +with a later sequence number.

source_portstring

identifies the port on the sending chain.

source_channelstring

identifies the channel end on the sending chain.

destination_portstring

identifies the port on the receiving chain.

destination_channelstring

identifies the channel end on the receiving chain.

databytes

actual opaque bytes transferred directly to the application module

timeout_heightibcgo.core.client.v1.Height

block height after which the packet times out

timeout_timestampuint64

block timestamp (in nanoseconds) after which the packet times out

+ + + + + +

PacketState

+

PacketState defines the generic type necessary to retrieve and store

packet commitments, acknowledgements, and receipts.

Caller is responsible for knowing the context necessary to interpret this

state as a commitment, acknowledgement, or a receipt.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

channel port identifier.

channel_idstring

channel unique identifier.

sequenceuint64

packet sequence.

databytes

embedded data that represents packet state.

+ + + + + + + +

Order

+

Order defines if a channel is ORDERED or UNORDERED

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
ORDER_NONE_UNSPECIFIED0

zero-value for channel ordering

ORDER_UNORDERED1

packets can be delivered in any order, which may differ from the order in +which they were sent.

ORDER_ORDERED2

packets are delivered exactly in the order which they were sent

+ +

State

+

State defines if a channel is in one of the following states:

CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
STATE_UNINITIALIZED_UNSPECIFIED0

Default State

STATE_INIT1

A channel has just started the opening handshake.

STATE_TRYOPEN2

A channel has acknowledged the handshake step on the counterparty chain.

STATE_OPEN3

A channel has completed the handshake. Open channels are +ready to send and receive packets.

STATE_CLOSED4

A channel has been closed and can no longer be used to send or receive +packets.

+ + + + + + + +
+

ibcgo/core/channel/v1/genesis.proto

Top +
+

+ + +

GenesisState

+

GenesisState defines the ibc channel submodule's genesis state.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
channelsIdentifiedChannelrepeated

acknowledgementsPacketStaterepeated

commitmentsPacketStaterepeated

receiptsPacketStaterepeated

send_sequencesPacketSequencerepeated

recv_sequencesPacketSequencerepeated

ack_sequencesPacketSequencerepeated

next_channel_sequenceuint64

the sequence for the next generated channel identifier

+ + + + + +

PacketSequence

+

PacketSequence defines the genesis type necessary to retrieve and store

next send and receive sequences.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

channel_idstring

sequenceuint64

+ + + + + + + + + + + + + +
+

ibcgo/core/channel/v1/query.proto

Top +
+

+ + +

QueryChannelClientStateRequest

+

QueryChannelClientStateRequest is the request type for the Query/ClientState

RPC method

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

+ + + + + +

QueryChannelClientStateResponse

+

QueryChannelClientStateResponse is the Response type for the

Query/QueryChannelClientState RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
identified_client_stateibcgo.core.client.v1.IdentifiedClientState

client state associated with the channel

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryChannelConsensusStateRequest

+

QueryChannelConsensusStateRequest is the request type for the

Query/ConsensusState RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

revision_numberuint64

revision number of the consensus state

revision_heightuint64

revision height of the consensus state

+ + + + + +

QueryChannelConsensusStateResponse

+

QueryChannelClientStateResponse is the Response type for the

Query/QueryChannelClientState RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
consensus_stategoogle.protobuf.Any

consensus state associated with the channel

client_idstring

client ID associated with the consensus state

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryChannelRequest

+

QueryChannelRequest is the request type for the Query/Channel RPC method

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

+ + + + + +

QueryChannelResponse

+

QueryChannelResponse is the response type for the Query/Channel RPC method.

Besides the Channel end, it includes a proof and the height from which the

proof was retrieved.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
channelChannel

channel associated with the request identifiers

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryChannelsRequest

+

QueryChannelsRequest is the request type for the Query/Channels RPC method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
paginationcosmos.base.query.v1beta1.PageRequest

pagination request

+ + + + + +

QueryChannelsResponse

+

QueryChannelsResponse is the response type for the Query/Channels RPC method.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
channelsIdentifiedChannelrepeated

list of stored channels of the chain.

paginationcosmos.base.query.v1beta1.PageResponse

pagination response

heightibcgo.core.client.v1.Height

query block height

+ + + + + +

QueryConnectionChannelsRequest

+

QueryConnectionChannelsRequest is the request type for the

Query/QueryConnectionChannels RPC method

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connectionstring

connection unique identifier

paginationcosmos.base.query.v1beta1.PageRequest

pagination request

+ + + + + +

QueryConnectionChannelsResponse

+

QueryConnectionChannelsResponse is the Response type for the

Query/QueryConnectionChannels RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
channelsIdentifiedChannelrepeated

list of channels associated with a connection.

paginationcosmos.base.query.v1beta1.PageResponse

pagination response

heightibcgo.core.client.v1.Height

query block height

+ + + + + +

QueryNextSequenceReceiveRequest

+

QueryNextSequenceReceiveRequest is the request type for the

Query/QueryNextSequenceReceiveRequest RPC method

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

+ + + + + +

QueryNextSequenceReceiveResponse

+

QuerySequenceResponse is the request type for the

Query/QueryNextSequenceReceiveResponse RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
next_sequence_receiveuint64

next sequence receive number

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryPacketAcknowledgementRequest

+

QueryPacketAcknowledgementRequest is the request type for the

Query/PacketAcknowledgement RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

sequenceuint64

packet sequence

+ + + + + +

QueryPacketAcknowledgementResponse

+

QueryPacketAcknowledgementResponse defines the client query response for a

packet which also includes a proof and the height from which the

proof was retrieved

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
acknowledgementbytes

packet associated with the request fields

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryPacketAcknowledgementsRequest

+

QueryPacketAcknowledgementsRequest is the request type for the

Query/QueryPacketCommitments RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

paginationcosmos.base.query.v1beta1.PageRequest

pagination request

+ + + + + +

QueryPacketAcknowledgementsResponse

+

QueryPacketAcknowledgemetsResponse is the request type for the

Query/QueryPacketAcknowledgements RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
acknowledgementsPacketStaterepeated

paginationcosmos.base.query.v1beta1.PageResponse

pagination response

heightibcgo.core.client.v1.Height

query block height

+ + + + + +

QueryPacketCommitmentRequest

+

QueryPacketCommitmentRequest is the request type for the

Query/PacketCommitment RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

sequenceuint64

packet sequence

+ + + + + +

QueryPacketCommitmentResponse

+

QueryPacketCommitmentResponse defines the client query response for a packet

which also includes a proof and the height from which the proof was

retrieved

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
commitmentbytes

packet associated with the request fields

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryPacketCommitmentsRequest

+

QueryPacketCommitmentsRequest is the request type for the

Query/QueryPacketCommitments RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

paginationcosmos.base.query.v1beta1.PageRequest

pagination request

+ + + + + +

QueryPacketCommitmentsResponse

+

QueryPacketCommitmentsResponse is the request type for the

Query/QueryPacketCommitments RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
commitmentsPacketStaterepeated

paginationcosmos.base.query.v1beta1.PageResponse

pagination response

heightibcgo.core.client.v1.Height

query block height

+ + + + + +

QueryPacketReceiptRequest

+

QueryPacketReceiptRequest is the request type for the

Query/PacketReceipt RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

sequenceuint64

packet sequence

+ + + + + +

QueryPacketReceiptResponse

+

QueryPacketReceiptResponse defines the client query response for a packet

receipt which also includes a proof, and the height from which the proof was

retrieved

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
receivedbool

success flag for if receipt exists

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryUnreceivedAcksRequest

+

QueryUnreceivedAcks is the request type for the

Query/UnreceivedAcks RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

packet_ack_sequencesuint64repeated

list of acknowledgement sequences

+ + + + + +

QueryUnreceivedAcksResponse

+

QueryUnreceivedAcksResponse is the response type for the

Query/UnreceivedAcks RPC method

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
sequencesuint64repeated

list of unreceived acknowledgement sequences

heightibcgo.core.client.v1.Height

query block height

+ + + + + +

QueryUnreceivedPacketsRequest

+

QueryUnreceivedPacketsRequest is the request type for the

Query/UnreceivedPackets RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

port unique identifier

channel_idstring

channel unique identifier

packet_commitment_sequencesuint64repeated

list of packet sequences

+ + + + + +

QueryUnreceivedPacketsResponse

+

QueryUnreceivedPacketsResponse is the response type for the

Query/UnreceivedPacketCommitments RPC method

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
sequencesuint64repeated

list of unreceived packet sequences

heightibcgo.core.client.v1.Height

query block height

+ + + + + + + + + + + +

Query

+

Query provides defines the gRPC querier service

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
ChannelQueryChannelRequestQueryChannelResponse

Channel queries an IBC Channel.

ChannelsQueryChannelsRequestQueryChannelsResponse

Channels queries all the IBC channels of a chain.

ConnectionChannelsQueryConnectionChannelsRequestQueryConnectionChannelsResponse

ConnectionChannels queries all the channels associated with a connection +end.

ChannelClientStateQueryChannelClientStateRequestQueryChannelClientStateResponse

ChannelClientState queries for the client state for the channel associated +with the provided channel identifiers.

ChannelConsensusStateQueryChannelConsensusStateRequestQueryChannelConsensusStateResponse

ChannelConsensusState queries for the consensus state for the channel +associated with the provided channel identifiers.

PacketCommitmentQueryPacketCommitmentRequestQueryPacketCommitmentResponse

PacketCommitment queries a stored packet commitment hash.

PacketCommitmentsQueryPacketCommitmentsRequestQueryPacketCommitmentsResponse

PacketCommitments returns all the packet commitments hashes associated +with a channel.

PacketReceiptQueryPacketReceiptRequestQueryPacketReceiptResponse

PacketReceipt queries if a given packet sequence has been received on the +queried chain

PacketAcknowledgementQueryPacketAcknowledgementRequestQueryPacketAcknowledgementResponse

PacketAcknowledgement queries a stored packet acknowledgement hash.

PacketAcknowledgementsQueryPacketAcknowledgementsRequestQueryPacketAcknowledgementsResponse

PacketAcknowledgements returns all the packet acknowledgements associated +with a channel.

UnreceivedPacketsQueryUnreceivedPacketsRequestQueryUnreceivedPacketsResponse

UnreceivedPackets returns all the unreceived IBC packets associated with a +channel and sequences.

UnreceivedAcksQueryUnreceivedAcksRequestQueryUnreceivedAcksResponse

UnreceivedAcks returns all the unreceived IBC acknowledgements associated +with a channel and sequences.

NextSequenceReceiveQueryNextSequenceReceiveRequestQueryNextSequenceReceiveResponse

NextSequenceReceive returns the next receive sequence for a given channel.

+ + + + +

Methods with HTTP bindings

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameMethodPatternBody
ChannelGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}
ChannelsGET/ibc/core/channel/v1/channels
ConnectionChannelsGET/ibc/core/channel/v1/connections/{connection}/channels
ChannelClientStateGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/client_state
ChannelConsensusStateGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/consensus_state/revision/{revision_number}/height/{revision_height}
PacketCommitmentGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{sequence}
PacketCommitmentsGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments
PacketReceiptGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_receipts/{sequence}
PacketAcknowledgementGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_acks/{sequence}
PacketAcknowledgementsGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_acknowledgements
UnreceivedPacketsGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{packet_commitment_sequences}/unreceived_packets
UnreceivedAcksGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/packet_commitments/{packet_ack_sequences}/unreceived_acks
NextSequenceReceiveGET/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/next_sequence
+ + + + +
+

ibcgo/core/channel/v1/tx.proto

Top +
+

+ + +

MsgAcknowledgement

+

MsgAcknowledgement receives incoming IBC acknowledgement

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
packetPacket

acknowledgementbytes

proof_ackedbytes

proof_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgAcknowledgementResponse

+

MsgAcknowledgementResponse defines the Msg/Acknowledgement response type.

+ + + + + +

MsgChannelCloseConfirm

+

MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B

to acknowledge the change of channel state to CLOSED on Chain A.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

channel_idstring

proof_initbytes

proof_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgChannelCloseConfirmResponse

+

MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response

type.

+ + + + + +

MsgChannelCloseInit

+

MsgChannelCloseInit defines a msg sent by a Relayer to Chain A

to close a channel with Chain B.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

channel_idstring

signerstring

+ + + + + +

MsgChannelCloseInitResponse

+

MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type.

+ + + + + +

MsgChannelOpenAck

+

MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge

the change of channel state to TRYOPEN on Chain B.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

channel_idstring

counterparty_channel_idstring

counterparty_versionstring

proof_trybytes

proof_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgChannelOpenAckResponse

+

MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type.

+ + + + + +

MsgChannelOpenConfirm

+

MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to

acknowledge the change of channel state to OPEN on Chain A.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

channel_idstring

proof_ackbytes

proof_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgChannelOpenConfirmResponse

+

MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response

type.

+ + + + + +

MsgChannelOpenInit

+

MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It

is called by a relayer on Chain A.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

channelChannel

signerstring

+ + + + + +

MsgChannelOpenInitResponse

+

MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type.

+ + + + + +

MsgChannelOpenTry

+

MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel

on Chain B.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
port_idstring

previous_channel_idstring

in the case of crossing hello's, when both chains call OpenInit, we need +the channel identifier of the previous channel in state INIT

channelChannel

counterparty_versionstring

proof_initbytes

proof_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgChannelOpenTryResponse

+

MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type.

+ + + + + +

MsgRecvPacket

+

MsgRecvPacket receives incoming IBC packet

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
packetPacket

proof_commitmentbytes

proof_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgRecvPacketResponse

+

MsgRecvPacketResponse defines the Msg/RecvPacket response type.

+ + + + + +

MsgTimeout

+

MsgTimeout receives timed-out packet

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
packetPacket

proof_unreceivedbytes

proof_heightibcgo.core.client.v1.Height

next_sequence_recvuint64

signerstring

+ + + + + +

MsgTimeoutOnClose

+

MsgTimeoutOnClose timed-out packet upon counterparty channel closure.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
packetPacket

proof_unreceivedbytes

proof_closebytes

proof_heightibcgo.core.client.v1.Height

next_sequence_recvuint64

signerstring

+ + + + + +

MsgTimeoutOnCloseResponse

+

MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type.

+ + + + + +

MsgTimeoutResponse

+

MsgTimeoutResponse defines the Msg/Timeout response type.

+ + + + + + + + + + + +

Msg

+

Msg defines the ibc/channel Msg service.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
ChannelOpenInitMsgChannelOpenInitMsgChannelOpenInitResponse

ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit.

ChannelOpenTryMsgChannelOpenTryMsgChannelOpenTryResponse

ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry.

ChannelOpenAckMsgChannelOpenAckMsgChannelOpenAckResponse

ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck.

ChannelOpenConfirmMsgChannelOpenConfirmMsgChannelOpenConfirmResponse

ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm.

ChannelCloseInitMsgChannelCloseInitMsgChannelCloseInitResponse

ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit.

ChannelCloseConfirmMsgChannelCloseConfirmMsgChannelCloseConfirmResponse

ChannelCloseConfirm defines a rpc handler method for +MsgChannelCloseConfirm.

RecvPacketMsgRecvPacketMsgRecvPacketResponse

RecvPacket defines a rpc handler method for MsgRecvPacket.

TimeoutMsgTimeoutMsgTimeoutResponse

Timeout defines a rpc handler method for MsgTimeout.

TimeoutOnCloseMsgTimeoutOnCloseMsgTimeoutOnCloseResponse

TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose.

AcknowledgementMsgAcknowledgementMsgAcknowledgementResponse

Acknowledgement defines a rpc handler method for MsgAcknowledgement.

+ + + + +
+

ibcgo/core/client/v1/genesis.proto

Top +
+

+ + +

GenesisMetadata

+

GenesisMetadata defines the genesis type for metadata that clients may return

with ExportMetadata

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
keybytes

store key of metadata without clientID-prefix

valuebytes

metadata value

+ + + + + +

GenesisState

+

GenesisState defines the ibc client submodule's genesis state.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
clientsIdentifiedClientStaterepeated

client states with their corresponding identifiers

clients_consensusClientConsensusStatesrepeated

consensus states from each client

clients_metadataIdentifiedGenesisMetadatarepeated

metadata from each client

paramsParams

create_localhostbool

create localhost on initialization

next_client_sequenceuint64

the sequence for the next generated client identifier

+ + + + + +

IdentifiedGenesisMetadata

+

IdentifiedGenesisMetadata has the client metadata with the corresponding

client id.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client_metadataGenesisMetadatarepeated

+ + + + + + + + + + + + + +
+

ibcgo/core/client/v1/query.proto

Top +
+

+ + +

QueryClientParamsRequest

+

QueryClientParamsRequest is the request type for the Query/ClientParams RPC

method.

+ + + + + +

QueryClientParamsResponse

+

QueryClientParamsResponse is the response type for the Query/ClientParams RPC

method.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
paramsParams

params defines the parameters of the module.

+ + + + + +

QueryClientStateRequest

+

QueryClientStateRequest is the request type for the Query/ClientState RPC

method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client state unique identifier

+ + + + + +

QueryClientStateResponse

+

QueryClientStateResponse is the response type for the Query/ClientState RPC

method. Besides the client state, it includes a proof and the height from

which the proof was retrieved.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_stategoogle.protobuf.Any

client state associated with the request identifier

proofbytes

merkle proof of existence

proof_heightHeight

height at which the proof was retrieved

+ + + + + +

QueryClientStatesRequest

+

QueryClientStatesRequest is the request type for the Query/ClientStates RPC

method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
paginationcosmos.base.query.v1beta1.PageRequest

pagination request

+ + + + + +

QueryClientStatesResponse

+

QueryClientStatesResponse is the response type for the Query/ClientStates RPC

method.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_statesIdentifiedClientStaterepeated

list of stored ClientStates of the chain.

paginationcosmos.base.query.v1beta1.PageResponse

pagination response

+ + + + + +

QueryConsensusStateRequest

+

QueryConsensusStateRequest is the request type for the Query/ConsensusState

RPC method. Besides the consensus state, it includes a proof and the height

from which the proof was retrieved.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client identifier

revision_numberuint64

consensus state revision number

revision_heightuint64

consensus state revision height

latest_heightbool

latest_height overrrides the height field and queries the latest stored +ConsensusState

+ + + + + +

QueryConsensusStateResponse

+

QueryConsensusStateResponse is the response type for the Query/ConsensusState

RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
consensus_stategoogle.protobuf.Any

consensus state associated with the client identifier at the given height

proofbytes

merkle proof of existence

proof_heightHeight

height at which the proof was retrieved

+ + + + + +

QueryConsensusStatesRequest

+

QueryConsensusStatesRequest is the request type for the Query/ConsensusStates

RPC method.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client identifier

paginationcosmos.base.query.v1beta1.PageRequest

pagination request

+ + + + + +

QueryConsensusStatesResponse

+

QueryConsensusStatesResponse is the response type for the

Query/ConsensusStates RPC method

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
consensus_statesConsensusStateWithHeightrepeated

consensus states associated with the identifier

paginationcosmos.base.query.v1beta1.PageResponse

pagination response

+ + + + + + + + + + + +

Query

+

Query provides defines the gRPC querier service

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
ClientStateQueryClientStateRequestQueryClientStateResponse

ClientState queries an IBC light client.

ClientStatesQueryClientStatesRequestQueryClientStatesResponse

ClientStates queries all the IBC light clients of a chain.

ConsensusStateQueryConsensusStateRequestQueryConsensusStateResponse

ConsensusState queries a consensus state associated with a client state at +a given height.

ConsensusStatesQueryConsensusStatesRequestQueryConsensusStatesResponse

ConsensusStates queries all the consensus state associated with a given +client.

ClientParamsQueryClientParamsRequestQueryClientParamsResponse

ClientParams queries all parameters of the ibc client.

+ + + + +

Methods with HTTP bindings

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameMethodPatternBody
ClientStateGET/ibc/core/client/v1/client_states/{client_id}
ClientStatesGET/ibc/core/client/v1/client_states
ConsensusStateGET/ibc/core/client/v1/consensus_states/{client_id}/revision/{revision_number}/height/{revision_height}
ConsensusStatesGET/ibc/core/client/v1/consensus_states/{client_id}
ClientParamsGET/ibc/client/v1/params
+ + + + +
+

ibcgo/core/client/v1/tx.proto

Top +
+

+ + +

MsgCreateClient

+

MsgCreateClient defines a message to create an IBC client

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_stategoogle.protobuf.Any

light client state

consensus_stategoogle.protobuf.Any

consensus state associated with the client that corresponds to a given +height.

signerstring

signer address

+ + + + + +

MsgCreateClientResponse

+

MsgCreateClientResponse defines the Msg/CreateClient response type.

+ + + + + +

MsgSubmitMisbehaviour

+

MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for

light client misbehaviour.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client unique identifier

misbehaviourgoogle.protobuf.Any

misbehaviour used for freezing the light client

signerstring

signer address

+ + + + + +

MsgSubmitMisbehaviourResponse

+

MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response

type.

+ + + + + +

MsgUpdateClient

+

MsgUpdateClient defines an sdk.Msg to update a IBC client state using

the given header.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client unique identifier

headergoogle.protobuf.Any

header to update the light client

signerstring

signer address

+ + + + + +

MsgUpdateClientResponse

+

MsgUpdateClientResponse defines the Msg/UpdateClient response type.

+ + + + + +

MsgUpgradeClient

+

MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client

state

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client unique identifier

client_stategoogle.protobuf.Any

upgraded client state

consensus_stategoogle.protobuf.Any

upgraded consensus state, only contains enough information to serve as a +basis of trust in update logic

proof_upgrade_clientbytes

proof that old chain committed to new client

proof_upgrade_consensus_statebytes

proof that old chain committed to new consensus state

signerstring

signer address

+ + + + + +

MsgUpgradeClientResponse

+

MsgUpgradeClientResponse defines the Msg/UpgradeClient response type.

+ + + + + + + + + + + +

Msg

+

Msg defines the ibc/client Msg service.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
CreateClientMsgCreateClientMsgCreateClientResponse

CreateClient defines a rpc handler method for MsgCreateClient.

UpdateClientMsgUpdateClientMsgUpdateClientResponse

UpdateClient defines a rpc handler method for MsgUpdateClient.

UpgradeClientMsgUpgradeClientMsgUpgradeClientResponse

UpgradeClient defines a rpc handler method for MsgUpgradeClient.

SubmitMisbehaviourMsgSubmitMisbehaviourMsgSubmitMisbehaviourResponse

SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour.

+ + + + +
+

ibcgo/core/commitment/v1/commitment.proto

Top +
+

+ + +

MerklePath

+

MerklePath is the path used to verify commitment proofs, which can be an

arbitrary structured object (defined by a commitment type).

MerklePath is represented from root-to-leaf

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
key_pathstringrepeated

+ + + + + +

MerklePrefix

+

MerklePrefix is merkle path prefixed to the key.

The constructed key from the Path and the key will be append(Path.KeyPath,

append(Path.KeyPrefix, key...))

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
key_prefixbytes

+ + + + + +

MerkleProof

+

MerkleProof is a wrapper type over a chain of CommitmentProofs.

It demonstrates membership or non-membership for an element or set of

elements, verifiable in conjunction with a known commitment root. Proofs

should be succinct.

MerkleProofs are ordered from leaf-to-root

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
proofsics23.CommitmentProofrepeated

+ + + + + +

MerkleRoot

+

MerkleRoot defines a merkle root hash.

In the Cosmos SDK, the AppHash of a block header becomes the root.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
hashbytes

+ + + + + + + + + + + + + +
+

ibcgo/core/connection/v1/connection.proto

Top +
+

+ + +

ClientPaths

+

ClientPaths define all the connection paths for a client state.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathsstringrepeated

list of connection paths

+ + + + + +

ConnectionEnd

+

ConnectionEnd defines a stateful object on a chain connected to another

separate one.

NOTE: there must only be 2 defined ConnectionEnds to establish

a connection between two chains.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client associated with this connection.

versionsVersionrepeated

IBC version which can be utilised to determine encodings or protocols for +channels or packets utilising this connection.

stateState

current state of the connection end.

counterpartyCounterparty

counterparty chain associated with this connection.

delay_perioduint64

delay period that must pass before a consensus state can be used for +packet-verification NOTE: delay period logic is only implemented by some +clients.

+ + + + + +

ConnectionPaths

+

ConnectionPaths define all the connection paths for a given client state.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client state unique identifier

pathsstringrepeated

list of connection paths

+ + + + + +

Counterparty

+

Counterparty defines the counterparty chain associated with a connection end.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

identifies the client on the counterparty chain associated with a given +connection.

connection_idstring

identifies the connection end on the counterparty chain associated with a +given connection.

prefixibcgo.core.commitment.v1.MerklePrefix

commitment merkle prefix of the counterparty chain.

+ + + + + +

IdentifiedConnection

+

IdentifiedConnection defines a connection with additional connection

identifier field.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
idstring

connection identifier.

client_idstring

client associated with this connection.

versionsVersionrepeated

IBC version which can be utilised to determine encodings or protocols for +channels or packets utilising this connection

stateState

current state of the connection end.

counterpartyCounterparty

counterparty chain associated with this connection.

delay_perioduint64

delay period associated with this connection.

+ + + + + +

Version

+

Version defines the versioning scheme used to negotiate the IBC verison in

the connection handshake.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
identifierstring

unique version identifier

featuresstringrepeated

list of features compatible with the specified identifier

+ + + + + + + +

State

+

State defines if a connection is in one of the following states:

INIT, TRYOPEN, OPEN or UNINITIALIZED.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
STATE_UNINITIALIZED_UNSPECIFIED0

Default State

STATE_INIT1

A connection end has just started the opening handshake.

STATE_TRYOPEN2

A connection end has acknowledged the handshake step on the counterparty +chain.

STATE_OPEN3

A connection end has completed the handshake.

+ + + + + + + +
+

ibcgo/core/connection/v1/genesis.proto

Top +
+

+ + +

GenesisState

+

GenesisState defines the ibc connection submodule's genesis state.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connectionsIdentifiedConnectionrepeated

client_connection_pathsConnectionPathsrepeated

next_connection_sequenceuint64

the sequence for the next generated connection identifier

+ + + + + + + + + + + + + +
+

ibcgo/core/connection/v1/query.proto

Top +
+

+ + +

QueryClientConnectionsRequest

+

QueryClientConnectionsRequest is the request type for the

Query/ClientConnections RPC method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

client identifier associated with a connection

+ + + + + +

QueryClientConnectionsResponse

+

QueryClientConnectionsResponse is the response type for the

Query/ClientConnections RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connection_pathsstringrepeated

slice of all the connection paths associated with a client.

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was generated

+ + + + + +

QueryConnectionClientStateRequest

+

QueryConnectionClientStateRequest is the request type for the

Query/ConnectionClientState RPC method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connection_idstring

connection identifier

+ + + + + +

QueryConnectionClientStateResponse

+

QueryConnectionClientStateResponse is the response type for the

Query/ConnectionClientState RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
identified_client_stateibcgo.core.client.v1.IdentifiedClientState

client state associated with the channel

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryConnectionConsensusStateRequest

+

QueryConnectionConsensusStateRequest is the request type for the

Query/ConnectionConsensusState RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connection_idstring

connection identifier

revision_numberuint64

revision_heightuint64

+ + + + + +

QueryConnectionConsensusStateResponse

+

QueryConnectionConsensusStateResponse is the response type for the

Query/ConnectionConsensusState RPC method

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
consensus_stategoogle.protobuf.Any

consensus state associated with the channel

client_idstring

client ID associated with the consensus state

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryConnectionRequest

+

QueryConnectionRequest is the request type for the Query/Connection RPC

method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connection_idstring

connection unique identifier

+ + + + + +

QueryConnectionResponse

+

QueryConnectionResponse is the response type for the Query/Connection RPC

method. Besides the connection end, it includes a proof and the height from

which the proof was retrieved.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connectionConnectionEnd

connection associated with the request identifier

proofbytes

merkle proof of existence

proof_heightibcgo.core.client.v1.Height

height at which the proof was retrieved

+ + + + + +

QueryConnectionsRequest

+

QueryConnectionsRequest is the request type for the Query/Connections RPC

method

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
paginationcosmos.base.query.v1beta1.PageRequest

+ + + + + +

QueryConnectionsResponse

+

QueryConnectionsResponse is the response type for the Query/Connections RPC

method.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connectionsIdentifiedConnectionrepeated

list of stored connections of the chain.

paginationcosmos.base.query.v1beta1.PageResponse

pagination response

heightibcgo.core.client.v1.Height

query block height

+ + + + + + + + + + + +

Query

+

Query provides defines the gRPC querier service

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
ConnectionQueryConnectionRequestQueryConnectionResponse

Connection queries an IBC connection end.

ConnectionsQueryConnectionsRequestQueryConnectionsResponse

Connections queries all the IBC connections of a chain.

ClientConnectionsQueryClientConnectionsRequestQueryClientConnectionsResponse

ClientConnections queries the connection paths associated with a client +state.

ConnectionClientStateQueryConnectionClientStateRequestQueryConnectionClientStateResponse

ConnectionClientState queries the client state associated with the +connection.

ConnectionConsensusStateQueryConnectionConsensusStateRequestQueryConnectionConsensusStateResponse

ConnectionConsensusState queries the consensus state associated with the +connection.

+ + + + +

Methods with HTTP bindings

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameMethodPatternBody
ConnectionGET/ibc/core/connection/v1/connections/{connection_id}
ConnectionsGET/ibc/core/connection/v1/connections
ClientConnectionsGET/ibc/core/connection/v1/client_connections/{client_id}
ConnectionClientStateGET/ibc/core/connection/v1/connections/{connection_id}/client_state
ConnectionConsensusStateGET/ibc/core/connection/v1/connections/{connection_id}/consensus_state/revision/{revision_number}/height/{revision_height}
+ + + + +
+

ibcgo/core/connection/v1/tx.proto

Top +
+

+ + +

MsgConnectionOpenAck

+

MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to

acknowledge the change of connection state to TRYOPEN on Chain B.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connection_idstring

counterparty_connection_idstring

versionVersion

client_stategoogle.protobuf.Any

proof_heightibcgo.core.client.v1.Height

proof_trybytes

proof of the initialization the connection on Chain B: `UNITIALIZED -> +TRYOPEN`

proof_clientbytes

proof of client state included in message

proof_consensusbytes

proof of client consensus state

consensus_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgConnectionOpenAckResponse

+

MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type.

+ + + + + +

MsgConnectionOpenConfirm

+

MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to

acknowledge the change of connection state to OPEN on Chain A.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
connection_idstring

proof_ackbytes

proof for the change of the connection state on Chain A: `INIT -> OPEN`

proof_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgConnectionOpenConfirmResponse

+

MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm

response type.

+ + + + + +

MsgConnectionOpenInit

+

MsgConnectionOpenInit defines the msg sent by an account on Chain A to

initialize a connection with Chain B.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

counterpartyCounterparty

versionVersion

delay_perioduint64

signerstring

+ + + + + +

MsgConnectionOpenInitResponse

+

MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response

type.

+ + + + + +

MsgConnectionOpenTry

+

MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a

connection on Chain B.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

previous_connection_idstring

in the case of crossing hello's, when both chains call OpenInit, we need +the connection identifier of the previous connection in state INIT

client_stategoogle.protobuf.Any

counterpartyCounterparty

delay_perioduint64

counterparty_versionsVersionrepeated

proof_heightibcgo.core.client.v1.Height

proof_initbytes

proof of the initialization the connection on Chain A: `UNITIALIZED -> +INIT`

proof_clientbytes

proof of client state included in message

proof_consensusbytes

proof of client consensus state

consensus_heightibcgo.core.client.v1.Height

signerstring

+ + + + + +

MsgConnectionOpenTryResponse

+

MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type.

+ + + + + + + + + + + +

Msg

+

Msg defines the ibc/connection Msg service.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Method NameRequest TypeResponse TypeDescription
ConnectionOpenInitMsgConnectionOpenInitMsgConnectionOpenInitResponse

ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit.

ConnectionOpenTryMsgConnectionOpenTryMsgConnectionOpenTryResponse

ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry.

ConnectionOpenAckMsgConnectionOpenAckMsgConnectionOpenAckResponse

ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck.

ConnectionOpenConfirmMsgConnectionOpenConfirmMsgConnectionOpenConfirmResponse

ConnectionOpenConfirm defines a rpc handler method for +MsgConnectionOpenConfirm.

+ + + + +
+

ibcgo/core/types/v1/genesis.proto

Top +
+

+ + +

GenesisState

+

GenesisState defines the ibc module's genesis state.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_genesisibcgo.core.client.v1.GenesisState

ICS002 - Clients genesis state

connection_genesisibcgo.core.connection.v1.GenesisState

ICS003 - Connections genesis state

channel_genesisibcgo.core.channel.v1.GenesisState

ICS004 - Channel genesis state

+ + + + + + + + + + + + + +
+

ibcgo/lightclients/localhost/v1/localhost.proto

Top +
+

+ + +

ClientState

+

ClientState defines a loopback (localhost) client. It requires (read-only)

access to keys outside the client prefix.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
chain_idstring

self chain ID

heightibcgo.core.client.v1.Height

self latest block height

+ + + + + + + + + + + + + +
+

ibcgo/lightclients/solomachine/v1/solomachine.proto

Top +
+

+ + +

ChannelStateData

+

ChannelStateData returns the SignBytes data for channel state

verification.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathbytes

channelibcgo.core.channel.v1.Channel

+ + + + + +

ClientState

+

ClientState defines a solo machine client that tracks the current consensus

state and if the client is frozen.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
sequenceuint64

latest sequence of the client state

frozen_sequenceuint64

frozen sequence of the solo machine

consensus_stateConsensusState

allow_update_after_proposalbool

when set to true, will allow governance to update a solo machine client. +The client will be unfrozen if it is frozen.

+ + + + + +

ClientStateData

+

ClientStateData returns the SignBytes data for client state verification.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathbytes

client_stategoogle.protobuf.Any

+ + + + + +

ConnectionStateData

+

ConnectionStateData returns the SignBytes data for connection state

verification.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathbytes

connectionibcgo.core.connection.v1.ConnectionEnd

+ + + + + +

ConsensusState

+

ConsensusState defines a solo machine consensus state. The sequence of a

consensus state is contained in the "height" key used in storing the

consensus state.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
public_keygoogle.protobuf.Any

public key of the solo machine

diversifierstring

diversifier allows the same public key to be re-used across different solo +machine clients (potentially on different chains) without being considered +misbehaviour.

timestampuint64

+ + + + + +

ConsensusStateData

+

ConsensusStateData returns the SignBytes data for consensus state

verification.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathbytes

consensus_stategoogle.protobuf.Any

+ + + + + +

Header

+

Header defines a solo machine consensus header

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
sequenceuint64

sequence to update solo machine public key at

timestampuint64

signaturebytes

new_public_keygoogle.protobuf.Any

new_diversifierstring

+ + + + + +

HeaderData

+

HeaderData returns the SignBytes data for update verification.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
new_pub_keygoogle.protobuf.Any

header public key

new_diversifierstring

header diversifier

+ + + + + +

Misbehaviour

+

Misbehaviour defines misbehaviour for a solo machine which consists

of a sequence and two signatures over different messages at that sequence.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

sequenceuint64

signature_oneSignatureAndData

signature_twoSignatureAndData

+ + + + + +

NextSequenceRecvData

+

NextSequenceRecvData returns the SignBytes data for verification of the next

sequence to be received.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathbytes

next_seq_recvuint64

+ + + + + +

PacketAcknowledgementData

+

PacketAcknowledgementData returns the SignBytes data for acknowledgement

verification.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathbytes

acknowledgementbytes

+ + + + + +

PacketCommitmentData

+

PacketCommitmentData returns the SignBytes data for packet commitment

verification.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathbytes

commitmentbytes

+ + + + + +

PacketReceiptAbsenceData

+

PacketReceiptAbsenceData returns the SignBytes data for

packet receipt absence verification.

+ + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
pathbytes

+ + + + + +

SignBytes

+

SignBytes defines the signed bytes used for signature verification.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
sequenceuint64

timestampuint64

diversifierstring

data_typeDataType

type of the data used

databytes

marshaled data

+ + + + + +

SignatureAndData

+

SignatureAndData contains a signature and the data signed over to create that

signature.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
signaturebytes

data_typeDataType

databytes

timestampuint64

+ + + + + +

TimestampedSignatureData

+

TimestampedSignatureData contains the signature data and the timestamp of the

signature.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
signature_databytes

timestampuint64

+ + + + + + + +

DataType

+

DataType defines the type of solo machine proof being created. This is done

to preserve uniqueness of different data sign byte encodings.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameNumberDescription
DATA_TYPE_UNINITIALIZED_UNSPECIFIED0

Default State

DATA_TYPE_CLIENT_STATE1

Data type for client state verification

DATA_TYPE_CONSENSUS_STATE2

Data type for consensus state verification

DATA_TYPE_CONNECTION_STATE3

Data type for connection state verification

DATA_TYPE_CHANNEL_STATE4

Data type for channel state verification

DATA_TYPE_PACKET_COMMITMENT5

Data type for packet commitment verification

DATA_TYPE_PACKET_ACKNOWLEDGEMENT6

Data type for packet acknowledgement verification

DATA_TYPE_PACKET_RECEIPT_ABSENCE7

Data type for packet receipt absence verification

DATA_TYPE_NEXT_SEQUENCE_RECV8

Data type for next sequence recv verification

DATA_TYPE_HEADER9

Data type for header verification

+ + + + + + + +
+

ibcgo/lightclients/tendermint/v1/tendermint.proto

Top +
+

+ + +

ClientState

+

ClientState from Tendermint tracks the current validator set, latest height,

and a possible frozen height.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
chain_idstring

trust_levelFraction

trusting_periodgoogle.protobuf.Duration

duration of the period since the LastestTimestamp during which the +submitted headers are valid for upgrade

unbonding_periodgoogle.protobuf.Duration

duration of the staking unbonding period

max_clock_driftgoogle.protobuf.Duration

defines how much new (untrusted) header's Time can drift into the future.

frozen_heightibcgo.core.client.v1.Height

Block height when the client was frozen due to a misbehaviour

latest_heightibcgo.core.client.v1.Height

Latest height the client was updated to

proof_specsics23.ProofSpecrepeated

Proof specifications used in verifying counterparty state

upgrade_pathstringrepeated

Path at which next upgraded client will be committed. +Each element corresponds to the key for a single CommitmentProof in the +chained proof. NOTE: ClientState must stored under +`{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored +under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using +the default upgrade module, upgrade_path should be []string{"upgrade", +"upgradedIBCState"}`

allow_update_after_expirybool

This flag, when set to true, will allow governance to recover a client +which has expired

allow_update_after_misbehaviourbool

This flag, when set to true, will allow governance to unfreeze a client +whose chain has experienced a misbehaviour event

+ + + + + +

ConsensusState

+

ConsensusState defines the consensus state from Tendermint.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
timestampgoogle.protobuf.Timestamp

timestamp that corresponds to the block height in which the ConsensusState +was stored.

rootibcgo.core.commitment.v1.MerkleRoot

commitment root (i.e app hash)

next_validators_hashbytes

+ + + + + +

Fraction

+

Fraction defines the protobuf message type for tmmath.Fraction that only

supports positive values.

+ + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
numeratoruint64

denominatoruint64

+ + + + + +

Header

+

Header defines the Tendermint client consensus Header.

It encapsulates all the information necessary to update from a trusted

Tendermint ConsensusState. The inclusion of TrustedHeight and

TrustedValidators allows this update to process correctly, so long as the

ConsensusState for the TrustedHeight exists, this removes race conditions

among relayers The SignedHeader and ValidatorSet are the new untrusted update

fields for the client. The TrustedHeight is the height of a stored

ConsensusState on the client that will be used to verify the new untrusted

header. The Trusted ConsensusState must be within the unbonding period of

current time in order to correctly verify, and the TrustedValidators must

hash to TrustedConsensusState.NextValidatorsHash since that is the last

trusted validator set at the TrustedHeight.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
signed_headertendermint.types.SignedHeader

validator_settendermint.types.ValidatorSet

trusted_heightibcgo.core.client.v1.Height

trusted_validatorstendermint.types.ValidatorSet

+ + + + + +

Misbehaviour

+

Misbehaviour is a wrapper over two conflicting Headers

that implements Misbehaviour interface expected by ICS-02

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldTypeLabelDescription
client_idstring

header_1Header

header_2Header

+ + + + + + + + + + + + + +

Scalar Value Types

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
.proto TypeNotesC++JavaPythonGoC#PHPRuby
doubledoubledoublefloatfloat64doublefloatFloat
floatfloatfloatfloatfloat32floatfloatFloat
int32Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead.int32intintint32intintegerBignum or Fixnum (as required)
int64Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead.int64longint/longint64longinteger/stringBignum
uint32Uses variable-length encoding.uint32intint/longuint32uintintegerBignum or Fixnum (as required)
uint64Uses variable-length encoding.uint64longint/longuint64ulonginteger/stringBignum or Fixnum (as required)
sint32Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s.int32intintint32intintegerBignum or Fixnum (as required)
sint64Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s.int64longint/longint64longinteger/stringBignum
fixed32Always four bytes. More efficient than uint32 if values are often greater than 2^28.uint32intintuint32uintintegerBignum or Fixnum (as required)
fixed64Always eight bytes. More efficient than uint64 if values are often greater than 2^56.uint64longint/longuint64ulonginteger/stringBignum
sfixed32Always four bytes.int32intintint32intintegerBignum or Fixnum (as required)
sfixed64Always eight bytes.int64longint/longint64longinteger/stringBignum
boolboolbooleanbooleanboolboolbooleanTrueClass/FalseClass
stringA string must always contain UTF-8 encoded or 7-bit ASCII text.stringStringstr/unicodestringstringstringString (UTF-8)
bytesMay contain any arbitrary sequence of bytes.stringByteStringstr[]byteByteStringstringString (ASCII-8BIT)
+ + + diff --git a/docs/protodoc-markdown.tmpl b/docs/protodoc-markdown.tmpl new file mode 100644 index 0000000000..e69de29bb2 diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000..34d93acce7 --- /dev/null +++ b/go.mod @@ -0,0 +1,23 @@ +go 1.15 + +module github.com/cosmos/ibc-go + +replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + +require ( + github.com/armon/go-metrics v0.3.6 + github.com/confio/ics23/go v0.6.3 + github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733 + github.com/gogo/protobuf v1.3.3 + github.com/golang/protobuf v1.4.3 + github.com/gorilla/mux v1.8.0 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/pkg/errors v0.9.1 + github.com/spf13/cobra v1.1.3 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.7.0 + github.com/tendermint/tendermint v0.34.7 + github.com/tendermint/tm-db v0.6.4 + google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f + google.golang.org/grpc v1.35.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000..667a3c4087 --- /dev/null +++ b/go.sum @@ -0,0 +1,1035 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/99designs/keyring v1.1.6 h1:kVDC2uCgVwecxCk+9zoCt2uEL6dt+dfVzMvGgnVcIuM= +github.com/99designs/keyring v1.1.6/go.mod h1:16e0ds7LGQQcT59QqkTg72Hh5ShM51Byv5PEmW6uoRU= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/Workiva/go-datastructures v1.0.52 h1:PLSK6pwn8mYdaoaCZEMsXBpBotr4HHn9abU0yMQt0NI= +github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.6 h1:x/tmtOF9cDBoXH7XoAGOz2qqm1DknFD1590XmD/DUJ8= +github.com/armon/go-metrics v0.3.6/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= +github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coinbase/rosetta-sdk-go v0.5.8/go.mod h1:xd4wYUhV3LkY78SPH8BUhc88rXfn2jYgN9BfiSjbcvM= +github.com/coinbase/rosetta-sdk-go v0.5.9 h1:CuGQE3HFmYwdEACJnuOtVI9cofqPsGvq6FdFIzaOPKI= +github.com/coinbase/rosetta-sdk-go v0.5.9/go.mod h1:xd4wYUhV3LkY78SPH8BUhc88rXfn2jYgN9BfiSjbcvM= +github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= +github.com/confio/ics23/go v0.6.3 h1:PuGK2V1NJWZ8sSkNDq91jgT/cahFEW9RGp4Y5jxulf0= +github.com/confio/ics23/go v0.6.3/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733 h1:C6B8uY33CgpL3yJt1vxOUFRPDtAjEIjbaFLL0av/8Y0= +github.com/cosmos/cosmos-sdk v0.40.0-rc7.0.20210222142146-c2be53a44733/go.mod h1:J7BQ+xrmuiF5xG+F/Ep+d30XUQmlpIjcPX4Lp0u4oks= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/iavl v0.15.0-rc3.0.20201009144442-230e9bdf52cd/go.mod h1:3xOIaNNX19p0QrX0VqWa6voPRoJRGGYtny+DH8NEPvE= +github.com/cosmos/iavl v0.15.0-rc5/go.mod h1:WqoPL9yPTQ85QBMT45OOUzPxG/U/JcJoN7uMjgxke/I= +github.com/cosmos/iavl v0.15.3 h1:xE9r6HW8GeKeoYJN4zefpljZ1oukVScP/7M8oj6SUts= +github.com/cosmos/iavl v0.15.3/go.mod h1:OLjQiAQ4fGD2KDZooyJG9yz+p2ao2IAYSbke8mVvSA4= +github.com/cosmos/ledger-cosmos-go v0.11.1 h1:9JIYsGnXP613pb2vPjFeMMjBI5lEDsEaF6oYorTy6J4= +github.com/cosmos/ledger-cosmos-go v0.11.1/go.mod h1:J8//BsAGTo3OC/vDLjMRFLW6q0WAaXvHnVc7ZmE8iUY= +github.com/cosmos/ledger-go v0.9.2 h1:Nnao/dLwaVTk1Q5U9THldpUMMXU94BOTWPddSmVB6pI= +github.com/cosmos/ledger-go v0.9.2/go.mod h1:oZJ2hHAZROdlHiwTg4t7kP+GKIIkBT+o6c9QWFanOyI= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/danieljoos/wincred v1.0.2 h1:zf4bhty2iLuwgjgpraD2E9UbvO+fe54XXGJbOwe23fU= +github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.1/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= +github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= +github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b h1:HBah4D48ypg3J7Np4N+HY/ZR76fx3HEUGxDU6Uk39oQ= +github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/enigmampc/btcutil v1.0.3-0.20200723161021-e2fb6adb2a25 h1:2vLKys4RBU4pn2T/hjXMbvwTr1Cvy5THHrQkbeY9HRk= +github.com/enigmampc/btcutil v1.0.3-0.20200723161021-e2fb6adb2a25/go.mod h1:hTr8+TLQmkUkgcuh3mcr5fjrT9c64ZzsBCdCEC6UppY= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ethereum/go-ethereum v1.9.23/go.mod h1:JIfVb6esrqALTExdz9hRYvrP0xBDf6wCncIu1hNwHpM= +github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= +github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y= +github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0= +github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64= +github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.1/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 h1:FlFbCRLd5Jr4iYXZufAvgWN6Ao0JrI5chLINnUXDDr0= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.7/go.mod h1:oYZKL012gGh6LMyg/xA7Q2yq6j8bu0wa+9w14EEthWU= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/improbable-eng/grpc-web v0.14.0 h1:GdoK+cXABdB+1keuqsV1drSFO2XLYIxqt/4Rj8SWGBk= +github.com/improbable-eng/grpc-web v0.14.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM= +github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA= +github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.4 h1:8KGKTcQQGm0Kv7vEbKFErAoAOFyyacLStRtQSeYtvkY= +github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/otiai10/copy v1.5.0 h1:SoXDGnlTUZoqB/wSuj/Y5L6T5i6iN4YRAcMCd+JnLNU= +github.com/otiai10/copy v1.5.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.2 h1:VYWnrP5fXmz1MXvjuUvcBrXSjGE6xjON+axB/UrpO3E= +github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw= +github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= +github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.17.0 h1:kDIZLI74SS+3tedSvEkykgBkD7txMxaJAPj8DtJUKYA= +github.com/prometheus/common v0.17.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regen-network/cosmos-proto v0.3.1 h1:rV7iM4SSFAagvy8RiyhiACbWEGotmqzywPxOvwMdxcg= +github.com/regen-network/cosmos-proto v0.3.1/go.mod h1:jO0sVX6a1B36nmE8C9xBFXpNwWejXC7QqCOnH3O0+YM= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs= +github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= +github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= +github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.4 h1:8q6vk3hthlpb2SouZcnBVKboxWQWMDNF38bwholZrJc= +github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= +github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/btcd v0.1.1 h1:0VcxPfflS2zZ3RiOAHkBiFUcPvbtRj5O7zHmcJWHV7s= +github.com/tendermint/btcd v0.1.1/go.mod h1:DC6/m53jtQzr/NFmMNEu0rxf18/ktVoVtMrnDD5pN+U= +github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2 h1:crekJuQ57yIBDuKd3/dMJ00ZvOHURuv9RGJSi2hWTW4= +github.com/tendermint/cosmos-rosetta-gateway v0.3.0-rc2/go.mod h1:gBPw8WV2Erm4UGHlBRiM3zaEBst4bsuihmMCNQdgP/s= +github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15 h1:hqAk8riJvK4RMWx1aInLzndwxKalgi5rTqgfXxOxbEI= +github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15/go.mod h1:z4YtwM70uOnk8h0pjJYlj3zdYwi9l03By6iAIF5j/Pk= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tendermint/tendermint v0.34.0-rc4/go.mod h1:yotsojf2C1QBOw4dZrTcxbyxmPUrT4hNuOQWX9XUwB4= +github.com/tendermint/tendermint v0.34.0-rc6/go.mod h1:ugzyZO5foutZImv0Iyx/gOFCX6mjJTgbLHTwi17VDVg= +github.com/tendermint/tendermint v0.34.0/go.mod h1:Aj3PIipBFSNO21r+Lq3TtzQ+uKESxkbA3yo/INM4QwQ= +github.com/tendermint/tendermint v0.34.7 h1:lvBJFNqpDuEzKfLZKtUXOL5dMOpqHonHlO6LCujyl6E= +github.com/tendermint/tendermint v0.34.7/go.mod h1:JVuu3V1ZexOaZG8VJMRl8lnfrGw6hEB2TVnoUwKRbss= +github.com/tendermint/tm-db v0.6.2/go.mod h1:GYtQ67SUvATOcoY8/+x6ylk8Qo02BQyLrAs+yAcLvGI= +github.com/tendermint/tm-db v0.6.3/go.mod h1:lfA1dL9/Y/Y8wwyPp2NMLyn5P5Ptr/gvDFNWtrCWSf8= +github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ= +github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw= +github.com/tidwall/gjson v1.6.1/go.mod h1:BaHyNc5bjzYkPqgLq7mdVzeiRtULKULXLgZFKsxEHI0= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/sjson v1.1.2/go.mod h1:SEzaDwxiPzKzNfUEO4HbYF/m4UCSJDsGgNqsS1LvdoY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vmihailenco/msgpack/v5 v5.0.0-beta.9/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= +github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e h1:AyodaIpKjppX+cBfTASF2E1US3H2JFBj920Ot3rtDjs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73 h1:+yTMTeazSO5iBqU9NR53hgriivQQbYa5Uuaj8r3qKII= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201111145450-ac7456db90a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201119123407-9b1e624d6bc4/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f h1:izedQ6yVIc5mZsRuXzmSreCOlzI0lCU1HpG8yEdMiKw= +google.golang.org/genproto v0.0.0-20210114201628-6edceaf6022f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10= +gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/light-clients/06-solomachine/doc.go b/light-clients/06-solomachine/doc.go new file mode 100644 index 0000000000..3673f3c3dc --- /dev/null +++ b/light-clients/06-solomachine/doc.go @@ -0,0 +1,7 @@ +/* +Package solomachine implements a concrete `ConsensusState`, `Header`, +`Misbehaviour` and `Equivocation` types for the Solo Machine light client. +This implementation is based off the ICS 06 specification: +https://github.com/cosmos/ics/tree/master/spec/ics-006-solo-machine-client +*/ +package solomachine diff --git a/light-clients/06-solomachine/module.go b/light-clients/06-solomachine/module.go new file mode 100644 index 0000000000..bafbd0152f --- /dev/null +++ b/light-clients/06-solomachine/module.go @@ -0,0 +1,10 @@ +package solomachine + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" +) + +// Name returns the solo machine client name. +func Name() string { + return types.SubModuleName +} diff --git a/light-clients/06-solomachine/spec/01_concepts.md b/light-clients/06-solomachine/spec/01_concepts.md new file mode 100644 index 0000000000..de486b71b1 --- /dev/null +++ b/light-clients/06-solomachine/spec/01_concepts.md @@ -0,0 +1,160 @@ + + +# Concepts + +## Client State + +The `ClientState` for a solo machine light client stores the latest sequence, the frozen sequence, +the latest consensus state, and client flag indicating if the client should be allowed to be updated +after a governance proposal. + +If the client is not frozen then the frozen sequence is 0. + +## Consensus State + +The consensus states stores the public key, diversifier, and timestamp of the solo machine light client. + +The diversifier is used to prevent accidental misbehaviour if the same public key is used across +different chains with the same client identifier. It should be unique to the chain the light client +is used on. + +## Public Key + +The public key can be a single public key or a multi-signature public key. The public key type used +must fulfill the tendermint public key interface (this will become the SDK public key interface in the +near future). The public key must be registered on the application codec otherwise encoding/decoding +errors will arise. The public key stored in the consensus state is represented as a protobuf `Any`. +This allows for flexibility in what other public key types can be supported in the future. + +## Counterparty Verification + +The solo machine light client can verify counterparty client state, consensus state, connection state, +channel state, packet commitments, packet acknowledgements, packet receipt absence, +and the next sequence receive. At the end of each successful verification call the light +client sequence number will be incremented. + +Successful verification requires the current public key to sign over the proof. + +## Proofs + +A solo machine proof should verify that the solomachine public key signed +over some specified data. The format for generating marshaled proofs for +the SDK's implementation of solo machine is as follows: + +1. Construct the data using the associated protobuf definition and marshal it. + +For example: + +```go +data := &ClientStateData{ + Path: []byte(path.String()), + ClientState: any, +} + +dataBz, err := cdc.MarshalBinaryBare(data) +``` + +The helper functions `...DataBytes()` in [proofs.go](../types/proofs.go) handle this +functionality. + +2. Construct the `SignBytes` and marshal it. + +For example: + +```go +signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: CLIENT, + Data: dataBz, +} + +signBz, err := cdc.MarshalBinaryBare(signBytes) +``` + +The helper functions `...SignBytes()` in [proofs.go](../types/proofs.go) handle this functionality. +The `DataType` field is used to disambiguate what type of data was signed to prevent potential +proto encoding overlap. + +3. Sign the sign bytes. Embed the signatures into either `SingleSignatureData` or `MultiSignatureData`. +Convert the `SignatureData` to proto and marshal it. + +For example: + +```go +sig, err := key.Sign(signBz) +sigData := &signing.SingleSignatureData{ + Signature: sig, +} + +protoSigData := signing.SignatureDataToProto(sigData) +bz, err := cdc.MarshalBinaryBare(protoSigData) +``` + +4. Construct a `TimestampedSignatureData` and marshal it. The marshaled result can be passed in +as the proof parameter to the verification functions. + +For example: + +```go +timestampedSignatureData := &types.TimestampedSignatureData{ + SignatureData: sigData, + Timestamp: solomachine.Time, +} + +proof, err := cdc.MarshalBinaryBare(timestampedSignatureData) +``` + +## Updates By Header + +An update by a header will only succeed if: + +- the header provided is parseable to solo machine header +- the header sequence matches the current sequence +- the header timestamp is greater than or equal to the consensus state timestamp +- the currently registered public key generated the proof + +If the update is successful: + +- the public key is updated +- the diversifier is updated +- the timestamp is updated +- the sequence is incremented by 1 +- the new consensus state is set in the client state + +## Updates By Proposal + +An update by a governance proposal will only succeed if: + +- the substitute provided is parseable to solo machine client state +- the `AllowUpdateAfterProposal` client parameter is set to `true` +- the new consensus state public key does not equal the current consensus state public key + +If the update is successful: + +- the subject client state is updated to the substitute client state +- the subject consensus state is updated to the substitute consensus state +- the client is unfrozen (if it was previously frozen) + +## Misbehaviour + +Misbehaviour handling will only succeed if: + +- the misbehaviour provided is parseable to solo machine misbehaviour +- the client is not already frozen +- the current public key signed over two unique data messages at the same sequence and diversifier. + +If the misbehaviour is successfully processed: + +- the client is frozen by setting the frozen sequence to the misbehaviour sequence + +NOTE: Misbehaviour processing is data processing order dependent. A misbehaving solo machine +could update to a new public key to prevent being frozen before misbehaviour is submitted. + +## Upgrades + +Upgrades to solo machine light clients are not supported since an entirely different type of +public key can be set using normal client updates. diff --git a/light-clients/06-solomachine/spec/02_state.md b/light-clients/06-solomachine/spec/02_state.md new file mode 100644 index 0000000000..a9ff4ea5b4 --- /dev/null +++ b/light-clients/06-solomachine/spec/02_state.md @@ -0,0 +1,12 @@ + + +# State + +The solo machine light client will only store consensus states for each update by a header +or a governance proposal. The latest client state is also maintained in the store. + +These values can be found under the light client paths defined in the IBC +[core store specs](../../../core/spec/02_state.md). + diff --git a/light-clients/06-solomachine/spec/03_state_transitions.md b/light-clients/06-solomachine/spec/03_state_transitions.md new file mode 100644 index 0000000000..48a1e18f1c --- /dev/null +++ b/light-clients/06-solomachine/spec/03_state_transitions.md @@ -0,0 +1,39 @@ + + +# State Transitions + +## Client State Verification Functions + +Successful state verification by a solo machine light client will result in: + +- the sequence being incremented by 1. + +## Update By Header + +A successful update of a solo machine light client by a header will result in: + +- the public key being updated to the new public key provided by the header. +- the diversifier being updated to the new diviersifier provided by the header. +- the timestamp being updated to the new timestamp provided by the header. +- the sequence being incremented by 1 +- the consensus state being updated (consensus state stores the public key, diversifier, and timestamp) + +## Update By Governance Proposal + +A successful update of a solo machine light client by a governance proposal will result in: + +- the client state being updated to the substitute client state +- the consensus state being updated to the substitute consensus state (consensus state stores the public key, diversifier, and timestamp) +- the frozen sequence being set to zero (client is unfrozen if it was previously frozen). + +## Upgrade + +Client udgrades are not supported for the solo machine light client. No state transition occurs. + +## Misbehaviour + +Successful misbehaviour processing of a solo machine light client will result in: + +- the frozen sequence being set to the sequence the misbehaviour occurred at diff --git a/light-clients/06-solomachine/spec/04_messages.md b/light-clients/06-solomachine/spec/04_messages.md new file mode 100644 index 0000000000..465ea6229a --- /dev/null +++ b/light-clients/06-solomachine/spec/04_messages.md @@ -0,0 +1,8 @@ + + +# Messages + +The messages used to initialize a solo machine light client are defined in the +core sub-module [02-client](../../../core/spec/04_messages.md). diff --git a/light-clients/06-solomachine/spec/README.md b/light-clients/06-solomachine/spec/README.md new file mode 100644 index 0000000000..77db1bfeee --- /dev/null +++ b/light-clients/06-solomachine/spec/README.md @@ -0,0 +1,26 @@ + + +# `solomachine` + +## Abstract + +This paper defines the implementation of the ICS06 protocol on the Cosmos SDK. For the general +specification please refer to the [ICS06 Specification](https://github.com/cosmos/ics/tree/master/spec/ics-006-solo-machine-client). + +This implementation of a solo machine light client supports single and multi-signature public +keys. The client is capable of handling public key updates by header and governance proposals. +The light client is capable of processing client misbehaviour. Proofs of the counterparty state +are generated by the solo machine client by signing over the desired state with a certain sequence, +diversifier, and timestamp. + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[State Transitions](03_state_transitions.md)** +4. **[Messages](04_messages.md)** diff --git a/light-clients/06-solomachine/types/client_state.go b/light-clients/06-solomachine/types/client_state.go new file mode 100644 index 0000000000..24a6582f0f --- /dev/null +++ b/light-clients/06-solomachine/types/client_state.go @@ -0,0 +1,491 @@ +package types + +import ( + "reflect" + + ics23 "github.com/confio/ics23/go" + + "github.com/cosmos/cosmos-sdk/codec" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.ClientState = (*ClientState)(nil) + +// NewClientState creates a new ClientState instance. +func NewClientState(latestSequence uint64, consensusState *ConsensusState, allowUpdateAfterProposal bool) *ClientState { + return &ClientState{ + Sequence: latestSequence, + FrozenSequence: 0, + ConsensusState: consensusState, + AllowUpdateAfterProposal: allowUpdateAfterProposal, + } +} + +// ClientType is Solo Machine. +func (cs ClientState) ClientType() string { + return exported.Solomachine +} + +// GetLatestHeight returns the latest sequence number. +// Return exported.Height to satisfy ClientState interface +// Revision number is always 0 for a solo-machine. +func (cs ClientState) GetLatestHeight() exported.Height { + return clienttypes.NewHeight(0, cs.Sequence) +} + +// IsFrozen returns true if the client is frozen. +func (cs ClientState) IsFrozen() bool { + return cs.FrozenSequence != 0 +} + +// GetFrozenHeight returns the frozen sequence of the client. +// Return exported.Height to satisfy interface +// Revision number is always 0 for a solo-machine +func (cs ClientState) GetFrozenHeight() exported.Height { + return clienttypes.NewHeight(0, cs.FrozenSequence) +} + +// GetProofSpecs returns nil proof specs since client state verification uses signatures. +func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec { + return nil +} + +// Validate performs basic validation of the client state fields. +func (cs ClientState) Validate() error { + if cs.Sequence == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "sequence cannot be 0") + } + if cs.ConsensusState == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state cannot be nil") + } + return cs.ConsensusState.ValidateBasic() +} + +// ZeroCustomFields returns solomachine client state with client-specific fields FrozenSequence, +// and AllowUpdateAfterProposal zeroed out +func (cs ClientState) ZeroCustomFields() exported.ClientState { + return NewClientState( + cs.Sequence, cs.ConsensusState, false, + ) +} + +// Initialize will check that initial consensus state is equal to the latest consensus state of the initial client. +func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, consState exported.ConsensusState) error { + if !reflect.DeepEqual(cs.ConsensusState, consState) { + return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "consensus state in initial client does not equal initial consensus state. expected: %s, got: %s", + cs.ConsensusState, consState) + } + return nil +} + +// ExportMetadata is a no-op since solomachine does not store any metadata in client store +func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata { + return nil +} + +// VerifyUpgradeAndUpdateState returns an error since solomachine client does not support upgrades +func (cs ClientState) VerifyUpgradeAndUpdateState( + _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, + _ exported.ClientState, _ exported.ConsensusState, _, _ []byte, +) (exported.ClientState, exported.ConsensusState, error) { + return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade solomachine client") +} + +// VerifyClientState verifies a proof of the client state of the running chain +// stored on the solo machine. +func (cs ClientState) VerifyClientState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + prefix exported.Prefix, + counterpartyClientIdentifier string, + proof []byte, + clientState exported.ClientState, +) error { + publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullClientStatePath(counterpartyClientIdentifier)) + path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath) + if err != nil { + return err + } + + signBz, err := ClientStateSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, clientState) + if err != nil { + return err + } + + if err := VerifySignature(publicKey, signBz, sigData); err != nil { + return err + } + + cs.Sequence++ + cs.ConsensusState.Timestamp = timestamp + setClientState(store, cdc, &cs) + return nil +} + +// VerifyClientConsensusState verifies a proof of the consensus state of the +// running chain stored on the solo machine. +func (cs ClientState) VerifyClientConsensusState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + counterpartyClientIdentifier string, + consensusHeight exported.Height, + prefix exported.Prefix, + proof []byte, + consensusState exported.ConsensusState, +) error { + publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullConsensusStatePath(counterpartyClientIdentifier, consensusHeight)) + path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath) + if err != nil { + return err + } + + signBz, err := ConsensusStateSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, consensusState) + if err != nil { + return err + } + + if err := VerifySignature(publicKey, signBz, sigData); err != nil { + return err + } + + cs.Sequence++ + cs.ConsensusState.Timestamp = timestamp + setClientState(store, cdc, &cs) + return nil +} + +// VerifyConnectionState verifies a proof of the connection state of the +// specified connection end stored on the target machine. +func (cs ClientState) VerifyConnectionState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + prefix exported.Prefix, + proof []byte, + connectionID string, + connectionEnd exported.ConnectionI, +) error { + publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + connectionPath := commitmenttypes.NewMerklePath(host.ConnectionPath(connectionID)) + path, err := commitmenttypes.ApplyPrefix(prefix, connectionPath) + if err != nil { + return err + } + + signBz, err := ConnectionStateSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, connectionEnd) + if err != nil { + return err + } + + if err := VerifySignature(publicKey, signBz, sigData); err != nil { + return err + } + + cs.Sequence++ + cs.ConsensusState.Timestamp = timestamp + setClientState(store, cdc, &cs) + return nil +} + +// VerifyChannelState verifies a proof of the channel state of the specified +// channel end, under the specified port, stored on the target machine. +func (cs ClientState) VerifyChannelState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + channel exported.ChannelI, +) error { + publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + channelPath := commitmenttypes.NewMerklePath(host.ChannelPath(portID, channelID)) + path, err := commitmenttypes.ApplyPrefix(prefix, channelPath) + if err != nil { + return err + } + + signBz, err := ChannelStateSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, channel) + if err != nil { + return err + } + + if err := VerifySignature(publicKey, signBz, sigData); err != nil { + return err + } + + cs.Sequence++ + cs.ConsensusState.Timestamp = timestamp + setClientState(store, cdc, &cs) + return nil +} + +// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at +// the specified port, specified channel, and specified sequence. +func (cs ClientState) VerifyPacketCommitment( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + _ uint64, + _ uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + packetSequence uint64, + commitmentBytes []byte, +) error { + publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + commitmentPath := commitmenttypes.NewMerklePath(host.PacketCommitmentPath(portID, channelID, packetSequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, commitmentPath) + if err != nil { + return err + } + + signBz, err := PacketCommitmentSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, commitmentBytes) + if err != nil { + return err + } + + if err := VerifySignature(publicKey, signBz, sigData); err != nil { + return err + } + + cs.Sequence++ + cs.ConsensusState.Timestamp = timestamp + setClientState(store, cdc, &cs) + return nil +} + +// VerifyPacketAcknowledgement verifies a proof of an incoming packet +// acknowledgement at the specified port, specified channel, and specified sequence. +func (cs ClientState) VerifyPacketAcknowledgement( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + _ uint64, + _ uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + packetSequence uint64, + acknowledgement []byte, +) error { + publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + ackPath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementPath(portID, channelID, packetSequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, ackPath) + if err != nil { + return err + } + + signBz, err := PacketAcknowledgementSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, acknowledgement) + if err != nil { + return err + } + + if err := VerifySignature(publicKey, signBz, sigData); err != nil { + return err + } + + cs.Sequence++ + cs.ConsensusState.Timestamp = timestamp + setClientState(store, cdc, &cs) + return nil +} + +// VerifyPacketReceiptAbsence verifies a proof of the absence of an +// incoming packet receipt at the specified port, specified channel, and +// specified sequence. +func (cs ClientState) VerifyPacketReceiptAbsence( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + _ uint64, + _ uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + packetSequence uint64, +) error { + publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + receiptPath := commitmenttypes.NewMerklePath(host.PacketReceiptPath(portID, channelID, packetSequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, receiptPath) + if err != nil { + return err + } + + signBz, err := PacketReceiptAbsenceSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path) + if err != nil { + return err + } + + if err := VerifySignature(publicKey, signBz, sigData); err != nil { + return err + } + + cs.Sequence++ + cs.ConsensusState.Timestamp = timestamp + setClientState(store, cdc, &cs) + return nil +} + +// VerifyNextSequenceRecv verifies a proof of the next sequence number to be +// received of the specified channel at the specified port. +func (cs ClientState) VerifyNextSequenceRecv( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + _ uint64, + _ uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + nextSequenceRecv uint64, +) error { + publicKey, sigData, timestamp, sequence, err := produceVerificationArgs(cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + nextSequenceRecvPath := commitmenttypes.NewMerklePath(host.NextSequenceRecvPath(portID, channelID)) + path, err := commitmenttypes.ApplyPrefix(prefix, nextSequenceRecvPath) + if err != nil { + return err + } + + signBz, err := NextSequenceRecvSignBytes(cdc, sequence, timestamp, cs.ConsensusState.Diversifier, path, nextSequenceRecv) + if err != nil { + return err + } + + if err := VerifySignature(publicKey, signBz, sigData); err != nil { + return err + } + + cs.Sequence++ + cs.ConsensusState.Timestamp = timestamp + setClientState(store, cdc, &cs) + return nil +} + +// produceVerificationArgs perfoms the basic checks on the arguments that are +// shared between the verification functions and returns the public key of the +// consensus state, the unmarshalled proof representing the signature and timestamp +// along with the solo-machine sequence encoded in the proofHeight. +func produceVerificationArgs( + cdc codec.BinaryMarshaler, + cs ClientState, + height exported.Height, + prefix exported.Prefix, + proof []byte, +) (cryptotypes.PubKey, signing.SignatureData, uint64, uint64, error) { + if revision := height.GetRevisionNumber(); revision != 0 { + return nil, nil, 0, 0, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "revision must be 0 for solomachine, got revision-number: %d", revision) + } + // sequence is encoded in the revision height of height struct + sequence := height.GetRevisionHeight() + if cs.IsFrozen() { + return nil, nil, 0, 0, clienttypes.ErrClientFrozen + } + + if prefix == nil { + return nil, nil, 0, 0, sdkerrors.Wrap(commitmenttypes.ErrInvalidPrefix, "prefix cannot be empty") + } + + _, ok := prefix.(commitmenttypes.MerklePrefix) + if !ok { + return nil, nil, 0, 0, sdkerrors.Wrapf(commitmenttypes.ErrInvalidPrefix, "invalid prefix type %T, expected MerklePrefix", prefix) + } + + if proof == nil { + return nil, nil, 0, 0, sdkerrors.Wrap(ErrInvalidProof, "proof cannot be empty") + } + + timestampedSigData := &TimestampedSignatureData{} + if err := cdc.UnmarshalBinaryBare(proof, timestampedSigData); err != nil { + return nil, nil, 0, 0, sdkerrors.Wrapf(err, "failed to unmarshal proof into type %T", timestampedSigData) + } + + timestamp := timestampedSigData.Timestamp + + if len(timestampedSigData.SignatureData) == 0 { + return nil, nil, 0, 0, sdkerrors.Wrap(ErrInvalidProof, "signature data cannot be empty") + } + + sigData, err := UnmarshalSignatureData(cdc, timestampedSigData.SignatureData) + if err != nil { + return nil, nil, 0, 0, err + } + + if cs.ConsensusState == nil { + return nil, nil, 0, 0, sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state cannot be empty") + } + + latestSequence := cs.GetLatestHeight().GetRevisionHeight() + if latestSequence != sequence { + return nil, nil, 0, 0, sdkerrors.Wrapf( + sdkerrors.ErrInvalidHeight, + "client state sequence != proof sequence (%d != %d)", latestSequence, sequence, + ) + } + + if cs.ConsensusState.GetTimestamp() > timestamp { + return nil, nil, 0, 0, sdkerrors.Wrapf(ErrInvalidProof, "the consensus state timestamp is greater than the signature timestamp (%d >= %d)", cs.ConsensusState.GetTimestamp(), timestamp) + } + + publicKey, err := cs.ConsensusState.GetPubKey() + if err != nil { + return nil, nil, 0, 0, err + } + + return publicKey, sigData, timestamp, sequence, nil +} + +// sets the client state to the store +func setClientState(store sdk.KVStore, cdc codec.BinaryMarshaler, clientState exported.ClientState) { + bz := clienttypes.MustMarshalClientState(cdc, clientState) + store.Set([]byte(host.KeyClientState), bz) +} diff --git a/light-clients/06-solomachine/types/client_state_test.go b/light-clients/06-solomachine/types/client_state_test.go new file mode 100644 index 0000000000..4f6c195c89 --- /dev/null +++ b/light-clients/06-solomachine/types/client_state_test.go @@ -0,0 +1,912 @@ +package types_test + +import ( + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +const ( + counterpartyClientIdentifier = "chainA" + testConnectionID = "connectionid" + testChannelID = "testchannelid" + testPortID = "testportid" +) + +var ( + prefix = commitmenttypes.NewMerklePrefix([]byte("ibc")) + consensusHeight = clienttypes.ZeroHeight() +) + +func (suite *SoloMachineTestSuite) TestClientStateValidateBasic() { + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + testCases := []struct { + name string + clientState *types.ClientState + expPass bool + }{ + { + "valid client state", + solomachine.ClientState(), + true, + }, + { + "empty ClientState", + &types.ClientState{}, + false, + }, + { + "sequence is zero", + types.NewClientState(0, &types.ConsensusState{solomachine.ConsensusState().PublicKey, solomachine.Diversifier, solomachine.Time}, false), + false, + }, + { + "timestamp is zero", + types.NewClientState(1, &types.ConsensusState{solomachine.ConsensusState().PublicKey, solomachine.Diversifier, 0}, false), + false, + }, + { + "diversifier is blank", + types.NewClientState(1, &types.ConsensusState{solomachine.ConsensusState().PublicKey, " ", 1}, false), + false, + }, + { + "pubkey is empty", + types.NewClientState(1, &types.ConsensusState{nil, solomachine.Diversifier, solomachine.Time}, false), + false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + + err := tc.clientState.Validate() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + } +} + +func (suite *SoloMachineTestSuite) TestInitialize() { + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + malleatedConsensus := solomachine.ClientState().ConsensusState + malleatedConsensus.Timestamp = malleatedConsensus.Timestamp + 10 + + testCases := []struct { + name string + consState exported.ConsensusState + expPass bool + }{ + { + "valid consensus state", + solomachine.ConsensusState(), + true, + }, + { + "nil consensus state", + nil, + false, + }, + { + "invalid consensus state: Tendermint consensus state", + &ibctmtypes.ConsensusState{}, + false, + }, + { + "invalid consensus state: consensus state does not match consensus state in client", + malleatedConsensus, + false, + }, + } + + for _, tc := range testCases { + err := solomachine.ClientState().Initialize( + suite.chainA.GetContext(), suite.chainA.Codec, + suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "solomachine"), + tc.consState, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid testcase: %s failed", tc.name) + } else { + suite.Require().Error(err, "invalid testcase: %s passed", tc.name) + } + } + } +} + +func (suite *SoloMachineTestSuite) TestVerifyClientState() { + // create client for tendermint so we can use client state for verification + clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + clientState := suite.chainA.GetClientState(clientA) + path := suite.solomachine.GetClientStatePath(counterpartyClientIdentifier) + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + value, err := types.ClientStateSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, clientState) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(value) + + signatureDoc := &types.TimestampedSignatureData{ + SignatureData: sig, + Timestamp: solomachine.Time, + } + + proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc) + suite.Require().NoError(err) + + testCases := []struct { + name string + clientState *types.ClientState + prefix exported.Prefix + proof []byte + expPass bool + }{ + { + "successful verification", + solomachine.ClientState(), + prefix, + proof, + true, + }, + { + "ApplyPrefix failed", + solomachine.ClientState(), + nil, + proof, + false, + }, + { + "client is frozen", + &types.ClientState{ + Sequence: 1, + FrozenSequence: 1, + ConsensusState: solomachine.ConsensusState(), + AllowUpdateAfterProposal: false, + }, + prefix, + proof, + false, + }, + { + "consensus state in client state is nil", + types.NewClientState(1, nil, false), + prefix, + proof, + false, + }, + { + "client state latest height is less than sequence", + types.NewClientState(solomachine.Sequence-1, + &types.ConsensusState{ + Timestamp: solomachine.Time, + PublicKey: solomachine.ConsensusState().PublicKey, + }, false), + prefix, + proof, + false, + }, + { + "consensus state timestamp is greater than signature", + types.NewClientState(solomachine.Sequence, + &types.ConsensusState{ + Timestamp: solomachine.Time + 1, + PublicKey: solomachine.ConsensusState().PublicKey, + }, false), + prefix, + proof, + false, + }, + + { + "proof is nil", + solomachine.ClientState(), + prefix, + nil, + false, + }, + { + "proof verification failed", + solomachine.ClientState(), + prefix, + suite.GetInvalidProof(), + false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + + var expSeq uint64 + if tc.clientState.ConsensusState != nil { + expSeq = tc.clientState.Sequence + 1 + } + + err := tc.clientState.VerifyClientState( + suite.store, suite.chainA.Codec, solomachine.GetHeight(), tc.prefix, counterpartyClientIdentifier, tc.proof, clientState, + ) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %s", suite.GetSequenceFromStore(), tc.name) + } else { + suite.Require().Error(err) + } + }) + } + } +} + +func (suite *SoloMachineTestSuite) TestVerifyClientConsensusState() { + // create client for tendermint so we can use consensus state for verification + clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + clientState := suite.chainA.GetClientState(clientA) + consensusState, found := suite.chainA.GetConsensusState(clientA, clientState.GetLatestHeight()) + suite.Require().True(found) + + path := suite.solomachine.GetConsensusStatePath(counterpartyClientIdentifier, consensusHeight) + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + value, err := types.ConsensusStateSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, consensusState) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(value) + signatureDoc := &types.TimestampedSignatureData{ + SignatureData: sig, + Timestamp: solomachine.Time, + } + + proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc) + suite.Require().NoError(err) + + testCases := []struct { + name string + clientState *types.ClientState + prefix exported.Prefix + proof []byte + expPass bool + }{ + { + "successful verification", + solomachine.ClientState(), + prefix, + proof, + true, + }, + { + "ApplyPrefix failed", + solomachine.ClientState(), + nil, + proof, + false, + }, + { + "client is frozen", + &types.ClientState{ + Sequence: 1, + FrozenSequence: 1, + ConsensusState: solomachine.ConsensusState(), + AllowUpdateAfterProposal: false, + }, + prefix, + proof, + false, + }, + { + "consensus state in client state is nil", + types.NewClientState(1, nil, false), + prefix, + proof, + false, + }, + { + "client state latest height is less than sequence", + types.NewClientState(solomachine.Sequence-1, + &types.ConsensusState{ + Timestamp: solomachine.Time, + PublicKey: solomachine.ConsensusState().PublicKey, + }, false), + prefix, + proof, + false, + }, + { + "consensus state timestamp is greater than signature", + types.NewClientState(solomachine.Sequence, + &types.ConsensusState{ + Timestamp: solomachine.Time + 1, + PublicKey: solomachine.ConsensusState().PublicKey, + }, false), + prefix, + proof, + false, + }, + + { + "proof is nil", + solomachine.ClientState(), + prefix, + nil, + false, + }, + { + "proof verification failed", + solomachine.ClientState(), + prefix, + suite.GetInvalidProof(), + false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + + var expSeq uint64 + if tc.clientState.ConsensusState != nil { + expSeq = tc.clientState.Sequence + 1 + } + + err := tc.clientState.VerifyClientConsensusState( + suite.store, suite.chainA.Codec, solomachine.GetHeight(), counterpartyClientIdentifier, consensusHeight, tc.prefix, tc.proof, consensusState, + ) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %s", suite.GetSequenceFromStore(), tc.name) + } else { + suite.Require().Error(err) + } + }) + } + } +} + +func (suite *SoloMachineTestSuite) TestVerifyConnectionState() { + counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix) + conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0) + + path := suite.solomachine.GetConnectionStatePath(testConnectionID) + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + value, err := types.ConnectionStateSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, conn) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(value) + signatureDoc := &types.TimestampedSignatureData{ + SignatureData: sig, + Timestamp: solomachine.Time, + } + + proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc) + suite.Require().NoError(err) + + testCases := []struct { + name string + clientState *types.ClientState + prefix exported.Prefix + proof []byte + expPass bool + }{ + { + "successful verification", + solomachine.ClientState(), + prefix, + proof, + true, + }, + { + "ApplyPrefix failed", + solomachine.ClientState(), + commitmenttypes.NewMerklePrefix([]byte{}), + proof, + false, + }, + { + "client is frozen", + &types.ClientState{ + Sequence: 1, + FrozenSequence: 1, + ConsensusState: solomachine.ConsensusState(), + AllowUpdateAfterProposal: false, + }, + prefix, + proof, + false, + }, + { + "proof is nil", + solomachine.ClientState(), + prefix, + nil, + false, + }, + { + "proof verification failed", + solomachine.ClientState(), + prefix, + suite.GetInvalidProof(), + false, + }, + } + + for i, tc := range testCases { + tc := tc + + expSeq := tc.clientState.Sequence + 1 + + err := tc.clientState.VerifyConnectionState( + suite.store, suite.chainA.Codec, solomachine.GetHeight(), tc.prefix, tc.proof, testConnectionID, conn, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } + } +} + +func (suite *SoloMachineTestSuite) TestVerifyChannelState() { + counterparty := channeltypes.NewCounterparty(testPortID, testChannelID) + ch := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "1.0.0") + + path := suite.solomachine.GetChannelStatePath(testPortID, testChannelID) + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + value, err := types.ChannelStateSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, ch) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(value) + signatureDoc := &types.TimestampedSignatureData{ + SignatureData: sig, + Timestamp: solomachine.Time, + } + + proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc) + suite.Require().NoError(err) + + testCases := []struct { + name string + clientState *types.ClientState + prefix exported.Prefix + proof []byte + expPass bool + }{ + { + "successful verification", + solomachine.ClientState(), + prefix, + proof, + true, + }, + { + "ApplyPrefix failed", + solomachine.ClientState(), + nil, + proof, + false, + }, + { + "client is frozen", + &types.ClientState{ + Sequence: 1, + FrozenSequence: 1, + ConsensusState: solomachine.ConsensusState(), + AllowUpdateAfterProposal: false, + }, + prefix, + proof, + false, + }, + { + "proof is nil", + solomachine.ClientState(), + prefix, + nil, + false, + }, + { + "proof verification failed", + solomachine.ClientState(), + prefix, + suite.GetInvalidProof(), + false, + }, + } + + for i, tc := range testCases { + tc := tc + + expSeq := tc.clientState.Sequence + 1 + + err := tc.clientState.VerifyChannelState( + suite.store, suite.chainA.Codec, solomachine.GetHeight(), tc.prefix, tc.proof, testPortID, testChannelID, ch, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } + } +} + +func (suite *SoloMachineTestSuite) TestVerifyPacketCommitment() { + commitmentBytes := []byte("COMMITMENT BYTES") + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + path := solomachine.GetPacketCommitmentPath(testPortID, testChannelID) + + value, err := types.PacketCommitmentSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, commitmentBytes) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(value) + signatureDoc := &types.TimestampedSignatureData{ + SignatureData: sig, + Timestamp: solomachine.Time, + } + + proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc) + suite.Require().NoError(err) + + testCases := []struct { + name string + clientState *types.ClientState + prefix exported.Prefix + proof []byte + expPass bool + }{ + { + "successful verification", + solomachine.ClientState(), + prefix, + proof, + true, + }, + { + "ApplyPrefix failed", + solomachine.ClientState(), + commitmenttypes.NewMerklePrefix([]byte{}), + proof, + false, + }, + { + "client is frozen", + &types.ClientState{ + Sequence: 1, + FrozenSequence: 1, + ConsensusState: solomachine.ConsensusState(), + AllowUpdateAfterProposal: false, + }, + prefix, + proof, + false, + }, + { + "proof is nil", + solomachine.ClientState(), + prefix, + nil, + false, + }, + { + "proof verification failed", + solomachine.ClientState(), + prefix, + suite.GetInvalidProof(), + false, + }, + } + + for i, tc := range testCases { + tc := tc + + expSeq := tc.clientState.Sequence + 1 + + err := tc.clientState.VerifyPacketCommitment( + suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, commitmentBytes, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } + } +} + +func (suite *SoloMachineTestSuite) TestVerifyPacketAcknowledgement() { + ack := []byte("ACK") + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + path := solomachine.GetPacketAcknowledgementPath(testPortID, testChannelID) + + value, err := types.PacketAcknowledgementSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, ack) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(value) + signatureDoc := &types.TimestampedSignatureData{ + SignatureData: sig, + Timestamp: solomachine.Time, + } + + proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc) + suite.Require().NoError(err) + + testCases := []struct { + name string + clientState *types.ClientState + prefix exported.Prefix + proof []byte + expPass bool + }{ + { + "successful verification", + solomachine.ClientState(), + prefix, + proof, + true, + }, + { + "ApplyPrefix failed", + solomachine.ClientState(), + commitmenttypes.NewMerklePrefix([]byte{}), + proof, + false, + }, + { + "client is frozen", + &types.ClientState{ + Sequence: 1, + FrozenSequence: 1, + ConsensusState: solomachine.ConsensusState(), + AllowUpdateAfterProposal: false, + }, + prefix, + proof, + false, + }, + { + "proof is nil", + solomachine.ClientState(), + prefix, + nil, + false, + }, + { + "proof verification failed", + solomachine.ClientState(), + prefix, + suite.GetInvalidProof(), + false, + }, + } + + for i, tc := range testCases { + tc := tc + + expSeq := tc.clientState.Sequence + 1 + + err := tc.clientState.VerifyPacketAcknowledgement( + suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, ack, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } + } +} + +func (suite *SoloMachineTestSuite) TestVerifyPacketReceiptAbsence() { + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + // absence uses receipt path as well + path := solomachine.GetPacketReceiptPath(testPortID, testChannelID) + + value, err := types.PacketReceiptAbsenceSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(value) + signatureDoc := &types.TimestampedSignatureData{ + SignatureData: sig, + Timestamp: solomachine.Time, + } + + proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc) + suite.Require().NoError(err) + + testCases := []struct { + name string + clientState *types.ClientState + prefix exported.Prefix + proof []byte + expPass bool + }{ + { + "successful verification", + solomachine.ClientState(), + prefix, + proof, + true, + }, + { + "ApplyPrefix failed", + solomachine.ClientState(), + commitmenttypes.NewMerklePrefix([]byte{}), + proof, + false, + }, + { + "client is frozen", + &types.ClientState{ + Sequence: 1, + FrozenSequence: 1, + ConsensusState: solomachine.ConsensusState(), + AllowUpdateAfterProposal: false, + }, + prefix, + proof, + false, + }, + { + "proof is nil", + solomachine.ClientState(), + prefix, + nil, + false, + }, + { + "proof verification failed", + solomachine.ClientState(), + prefix, + suite.GetInvalidProof(), + false, + }, + } + + for i, tc := range testCases { + tc := tc + + expSeq := tc.clientState.Sequence + 1 + + err := tc.clientState.VerifyPacketReceiptAbsence( + suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, solomachine.Sequence, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } + } +} + +func (suite *SoloMachineTestSuite) TestVerifyNextSeqRecv() { + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + nextSeqRecv := solomachine.Sequence + 1 + path := solomachine.GetNextSequenceRecvPath(testPortID, testChannelID) + + value, err := types.NextSequenceRecvSignBytes(suite.chainA.Codec, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, nextSeqRecv) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(value) + signatureDoc := &types.TimestampedSignatureData{ + SignatureData: sig, + Timestamp: solomachine.Time, + } + + proof, err := suite.chainA.Codec.MarshalBinaryBare(signatureDoc) + suite.Require().NoError(err) + + testCases := []struct { + name string + clientState *types.ClientState + prefix exported.Prefix + proof []byte + expPass bool + }{ + { + "successful verification", + solomachine.ClientState(), + prefix, + proof, + true, + }, + { + "ApplyPrefix failed", + solomachine.ClientState(), + commitmenttypes.NewMerklePrefix([]byte{}), + proof, + false, + }, + { + "client is frozen", + &types.ClientState{ + Sequence: 1, + FrozenSequence: 1, + ConsensusState: solomachine.ConsensusState(), + AllowUpdateAfterProposal: false, + }, + prefix, + proof, + false, + }, + { + "proof is nil", + solomachine.ClientState(), + prefix, + nil, + false, + }, + { + "proof verification failed", + solomachine.ClientState(), + prefix, + suite.GetInvalidProof(), + false, + }, + } + + for i, tc := range testCases { + tc := tc + + expSeq := tc.clientState.Sequence + 1 + + err := tc.clientState.VerifyNextSequenceRecv( + suite.store, suite.chainA.Codec, solomachine.GetHeight(), 0, 0, tc.prefix, tc.proof, testPortID, testChannelID, nextSeqRecv, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(expSeq, suite.GetSequenceFromStore(), "sequence not updated in the store (%d) on valid test case %d: %s", suite.GetSequenceFromStore(), i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } + } +} diff --git a/light-clients/06-solomachine/types/codec.go b/light-clients/06-solomachine/types/codec.go new file mode 100644 index 0000000000..313a910ca9 --- /dev/null +++ b/light-clients/06-solomachine/types/codec.go @@ -0,0 +1,130 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// RegisterInterfaces register the ibc channel submodule interfaces to protobuf +// Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*exported.ClientState)(nil), + &ClientState{}, + ) + registry.RegisterImplementations( + (*exported.ConsensusState)(nil), + &ConsensusState{}, + ) + registry.RegisterImplementations( + (*exported.Header)(nil), + &Header{}, + ) + registry.RegisterImplementations( + (*exported.Misbehaviour)(nil), + &Misbehaviour{}, + ) +} + +func UnmarshalSignatureData(cdc codec.BinaryMarshaler, data []byte) (signing.SignatureData, error) { + protoSigData := &signing.SignatureDescriptor_Data{} + if err := cdc.UnmarshalBinaryBare(data, protoSigData); err != nil { + return nil, sdkerrors.Wrapf(err, "failed to unmarshal proof into type %T", protoSigData) + } + + sigData := signing.SignatureDataFromProto(protoSigData) + + return sigData, nil +} + +// UnmarshalDataByType attempts to unmarshal the data to the specified type. An error is +// return if it fails. +func UnmarshalDataByType(cdc codec.BinaryMarshaler, dataType DataType, data []byte) (Data, error) { + if len(data) == 0 { + return nil, sdkerrors.Wrap(ErrInvalidSignatureAndData, "data cannot be empty") + } + + switch dataType { + case UNSPECIFIED: + return nil, sdkerrors.Wrap(ErrInvalidDataType, "data type cannot be UNSPECIFIED") + + case CLIENT: + clientData := &ClientStateData{} + if err := cdc.UnmarshalBinaryBare(data, clientData); err != nil { + return nil, err + } + + // unpack any + if _, err := clienttypes.UnpackClientState(clientData.ClientState); err != nil { + return nil, err + } + return clientData, nil + + case CONSENSUS: + consensusData := &ConsensusStateData{} + if err := cdc.UnmarshalBinaryBare(data, consensusData); err != nil { + return nil, err + } + + // unpack any + if _, err := clienttypes.UnpackConsensusState(consensusData.ConsensusState); err != nil { + return nil, err + } + return consensusData, nil + + case CONNECTION: + connectionData := &ConnectionStateData{} + if err := cdc.UnmarshalBinaryBare(data, connectionData); err != nil { + return nil, err + } + + return connectionData, nil + + case CHANNEL: + channelData := &ChannelStateData{} + if err := cdc.UnmarshalBinaryBare(data, channelData); err != nil { + return nil, err + } + + return channelData, nil + + case PACKETCOMMITMENT: + commitmentData := &PacketCommitmentData{} + if err := cdc.UnmarshalBinaryBare(data, commitmentData); err != nil { + return nil, err + } + + return commitmentData, nil + + case PACKETACKNOWLEDGEMENT: + ackData := &PacketAcknowledgementData{} + if err := cdc.UnmarshalBinaryBare(data, ackData); err != nil { + return nil, err + } + + return ackData, nil + + case PACKETRECEIPTABSENCE: + receiptAbsenceData := &PacketReceiptAbsenceData{} + if err := cdc.UnmarshalBinaryBare(data, receiptAbsenceData); err != nil { + return nil, err + } + + return receiptAbsenceData, nil + + case NEXTSEQUENCERECV: + nextSeqRecvData := &NextSequenceRecvData{} + if err := cdc.UnmarshalBinaryBare(data, nextSeqRecvData); err != nil { + return nil, err + } + + return nextSeqRecvData, nil + + default: + return nil, sdkerrors.Wrapf(ErrInvalidDataType, "unsupported data type %T", dataType) + } +} diff --git a/light-clients/06-solomachine/types/codec_test.go b/light-clients/06-solomachine/types/codec_test.go new file mode 100644 index 0000000000..70be186a10 --- /dev/null +++ b/light-clients/06-solomachine/types/codec_test.go @@ -0,0 +1,190 @@ +package types_test + +import ( + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite SoloMachineTestSuite) TestUnmarshalDataByType() { + var ( + data []byte + err error + ) + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + cdc := suite.chainA.App.AppCodec() + cases := []struct { + name string + dataType types.DataType + malleate func() + expPass bool + }{ + { + "empty data", types.CLIENT, func() { + data = []byte{} + }, false, + }, + { + "unspecified", types.UNSPECIFIED, func() { + path := solomachine.GetClientStatePath(counterpartyClientIdentifier) + data, err = types.ClientStateDataBytes(cdc, path, solomachine.ClientState()) + suite.Require().NoError(err) + }, false, + }, + { + "client", types.CLIENT, func() { + path := solomachine.GetClientStatePath(counterpartyClientIdentifier) + data, err = types.ClientStateDataBytes(cdc, path, solomachine.ClientState()) + suite.Require().NoError(err) + }, true, + }, + { + "bad client (provides consensus state data)", types.CLIENT, func() { + path := solomachine.GetConsensusStatePath(counterpartyClientIdentifier, clienttypes.NewHeight(0, 5)) + data, err = types.ConsensusStateDataBytes(cdc, path, solomachine.ConsensusState()) + suite.Require().NoError(err) + }, false, + }, + { + "consensus", types.CONSENSUS, func() { + path := solomachine.GetConsensusStatePath(counterpartyClientIdentifier, clienttypes.NewHeight(0, 5)) + data, err = types.ConsensusStateDataBytes(cdc, path, solomachine.ConsensusState()) + suite.Require().NoError(err) + + }, true, + }, + { + "bad consensus (provides client state data)", types.CONSENSUS, func() { + path := solomachine.GetClientStatePath(counterpartyClientIdentifier) + data, err = types.ClientStateDataBytes(cdc, path, solomachine.ClientState()) + suite.Require().NoError(err) + }, false, + }, + { + "connection", types.CONNECTION, func() { + counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix) + conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0) + path := solomachine.GetConnectionStatePath("connectionID") + + data, err = types.ConnectionStateDataBytes(cdc, path, conn) + suite.Require().NoError(err) + + }, true, + }, + { + "bad connection (uses channel data)", types.CONNECTION, func() { + counterparty := channeltypes.NewCounterparty(testPortID, testChannelID) + ch := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "1.0.0") + path := solomachine.GetChannelStatePath("portID", "channelID") + + data, err = types.ChannelStateDataBytes(cdc, path, ch) + suite.Require().NoError(err) + }, false, + }, + { + "channel", types.CHANNEL, func() { + counterparty := channeltypes.NewCounterparty(testPortID, testChannelID) + ch := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "1.0.0") + path := solomachine.GetChannelStatePath("portID", "channelID") + + data, err = types.ChannelStateDataBytes(cdc, path, ch) + suite.Require().NoError(err) + }, true, + }, + { + "bad channel (uses connection data)", types.CHANNEL, func() { + counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, prefix) + conn := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions()), 0) + path := solomachine.GetConnectionStatePath("connectionID") + + data, err = types.ConnectionStateDataBytes(cdc, path, conn) + suite.Require().NoError(err) + + }, false, + }, + { + "packet commitment", types.PACKETCOMMITMENT, func() { + commitment := []byte("packet commitment") + path := solomachine.GetPacketCommitmentPath("portID", "channelID") + + data, err = types.PacketCommitmentDataBytes(cdc, path, commitment) + suite.Require().NoError(err) + }, true, + }, + { + "bad packet commitment (uses next seq recv)", types.PACKETCOMMITMENT, func() { + path := solomachine.GetNextSequenceRecvPath("portID", "channelID") + + data, err = types.NextSequenceRecvDataBytes(cdc, path, 10) + suite.Require().NoError(err) + }, false, + }, + { + "packet acknowledgement", types.PACKETACKNOWLEDGEMENT, func() { + commitment := []byte("packet acknowledgement") + path := solomachine.GetPacketAcknowledgementPath("portID", "channelID") + + data, err = types.PacketAcknowledgementDataBytes(cdc, path, commitment) + suite.Require().NoError(err) + }, true, + }, + { + "bad packet acknowledgement (uses next sequence recv)", types.PACKETACKNOWLEDGEMENT, func() { + path := solomachine.GetNextSequenceRecvPath("portID", "channelID") + + data, err = types.NextSequenceRecvDataBytes(cdc, path, 10) + suite.Require().NoError(err) + }, false, + }, + { + "packet acknowledgement absence", types.PACKETRECEIPTABSENCE, func() { + path := solomachine.GetPacketReceiptPath("portID", "channelID") + + data, err = types.PacketReceiptAbsenceDataBytes(cdc, path) + suite.Require().NoError(err) + }, true, + }, + { + "next sequence recv", types.NEXTSEQUENCERECV, func() { + path := solomachine.GetNextSequenceRecvPath("portID", "channelID") + + data, err = types.NextSequenceRecvDataBytes(cdc, path, 10) + suite.Require().NoError(err) + }, true, + }, + { + "bad next sequence recv (uses packet commitment)", types.NEXTSEQUENCERECV, func() { + commitment := []byte("packet commitment") + path := solomachine.GetPacketCommitmentPath("portID", "channelID") + + data, err = types.PacketCommitmentDataBytes(cdc, path, commitment) + suite.Require().NoError(err) + }, false, + }, + } + + for _, tc := range cases { + tc := tc + + suite.Run(tc.name, func() { + tc.malleate() + + data, err := types.UnmarshalDataByType(cdc, tc.dataType, data) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().NotNil(data) + } else { + suite.Require().Error(err) + suite.Require().Nil(data) + } + }) + } + } + +} diff --git a/light-clients/06-solomachine/types/consensus_state.go b/light-clients/06-solomachine/types/consensus_state.go new file mode 100644 index 0000000000..7d6d09cd04 --- /dev/null +++ b/light-clients/06-solomachine/types/consensus_state.go @@ -0,0 +1,60 @@ +package types + +import ( + "strings" + + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.ConsensusState = &ConsensusState{} + +// ClientType returns Solo Machine type. +func (ConsensusState) ClientType() string { + return exported.Solomachine +} + +// GetTimestamp returns zero. +func (cs ConsensusState) GetTimestamp() uint64 { + return cs.Timestamp +} + +// GetRoot returns nil since solo machines do not have roots. +func (cs ConsensusState) GetRoot() exported.Root { + return nil +} + +// GetPubKey unmarshals the public key into a cryptotypes.PubKey type. +// An error is returned if the public key is nil or the cached value +// is not a PubKey. +func (cs ConsensusState) GetPubKey() (cryptotypes.PubKey, error) { + if cs.PublicKey == nil { + return nil, sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state PublicKey cannot be nil") + } + + publicKey, ok := cs.PublicKey.GetCachedValue().(cryptotypes.PubKey) + if !ok { + return nil, sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state PublicKey is not cryptotypes.PubKey") + } + + return publicKey, nil +} + +// ValidateBasic defines basic validation for the solo machine consensus state. +func (cs ConsensusState) ValidateBasic() error { + if cs.Timestamp == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "timestamp cannot be 0") + } + if cs.Diversifier != "" && strings.TrimSpace(cs.Diversifier) == "" { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "diversifier cannot contain only spaces") + } + + publicKey, err := cs.GetPubKey() + if err != nil || publicKey == nil || len(publicKey.Bytes()) == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "public key cannot be empty") + } + + return nil +} diff --git a/light-clients/06-solomachine/types/consensus_state_test.go b/light-clients/06-solomachine/types/consensus_state_test.go new file mode 100644 index 0000000000..e0c22f9595 --- /dev/null +++ b/light-clients/06-solomachine/types/consensus_state_test.go @@ -0,0 +1,75 @@ +package types_test + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *SoloMachineTestSuite) TestConsensusState() { + consensusState := suite.solomachine.ConsensusState() + + suite.Require().Equal(exported.Solomachine, consensusState.ClientType()) + suite.Require().Equal(suite.solomachine.Time, consensusState.GetTimestamp()) + suite.Require().Nil(consensusState.GetRoot()) +} + +func (suite *SoloMachineTestSuite) TestConsensusStateValidateBasic() { + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + testCases := []struct { + name string + consensusState *types.ConsensusState + expPass bool + }{ + { + "valid consensus state", + solomachine.ConsensusState(), + true, + }, + { + "timestamp is zero", + &types.ConsensusState{ + PublicKey: solomachine.ConsensusState().PublicKey, + Timestamp: 0, + Diversifier: solomachine.Diversifier, + }, + false, + }, + { + "diversifier is blank", + &types.ConsensusState{ + PublicKey: solomachine.ConsensusState().PublicKey, + Timestamp: solomachine.Time, + Diversifier: " ", + }, + false, + }, + { + "pubkey is nil", + &types.ConsensusState{ + Timestamp: solomachine.Time, + Diversifier: solomachine.Diversifier, + PublicKey: nil, + }, + false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + + err := tc.consensusState.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + } +} diff --git a/light-clients/06-solomachine/types/errors.go b/light-clients/06-solomachine/types/errors.go new file mode 100644 index 0000000000..3e27f60732 --- /dev/null +++ b/light-clients/06-solomachine/types/errors.go @@ -0,0 +1,18 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + SubModuleName = "solo machine" +) + +var ( + ErrInvalidHeader = sdkerrors.Register(SubModuleName, 2, "invalid header") + ErrInvalidSequence = sdkerrors.Register(SubModuleName, 3, "invalid sequence") + ErrInvalidSignatureAndData = sdkerrors.Register(SubModuleName, 4, "invalid signature and data") + ErrSignatureVerificationFailed = sdkerrors.Register(SubModuleName, 5, "signature verification failed") + ErrInvalidProof = sdkerrors.Register(SubModuleName, 6, "invalid solo machine proof") + ErrInvalidDataType = sdkerrors.Register(SubModuleName, 7, "invalid data type") +) diff --git a/light-clients/06-solomachine/types/header.go b/light-clients/06-solomachine/types/header.go new file mode 100644 index 0000000000..f9c5f176fd --- /dev/null +++ b/light-clients/06-solomachine/types/header.go @@ -0,0 +1,67 @@ +package types + +import ( + "strings" + + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.Header = &Header{} + +// ClientType defines that the Header is a Solo Machine. +func (Header) ClientType() string { + return exported.Solomachine +} + +// GetHeight returns the current sequence number as the height. +// Return clientexported.Height to satisfy interface +// Revision number is always 0 for a solo-machine +func (h Header) GetHeight() exported.Height { + return clienttypes.NewHeight(0, h.Sequence) +} + +// GetPubKey unmarshals the new public key into a cryptotypes.PubKey type. +// An error is returned if the new public key is nil or the cached value +// is not a PubKey. +func (h Header) GetPubKey() (cryptotypes.PubKey, error) { + if h.NewPublicKey == nil { + return nil, sdkerrors.Wrap(ErrInvalidHeader, "header NewPublicKey cannot be nil") + } + + publicKey, ok := h.NewPublicKey.GetCachedValue().(cryptotypes.PubKey) + if !ok { + return nil, sdkerrors.Wrap(ErrInvalidHeader, "header NewPublicKey is not cryptotypes.PubKey") + } + + return publicKey, nil +} + +// ValidateBasic ensures that the sequence, signature and public key have all +// been initialized. +func (h Header) ValidateBasic() error { + if h.Sequence == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "sequence number cannot be zero") + } + + if h.Timestamp == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "timestamp cannot be zero") + } + + if h.NewDiversifier != "" && strings.TrimSpace(h.NewDiversifier) == "" { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "diversifier cannot contain only spaces") + } + + if len(h.Signature) == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "signature cannot be empty") + } + + newPublicKey, err := h.GetPubKey() + if err != nil || newPublicKey == nil || len(newPublicKey.Bytes()) == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "new public key cannot be empty") + } + + return nil +} diff --git a/light-clients/06-solomachine/types/header_test.go b/light-clients/06-solomachine/types/header_test.go new file mode 100644 index 0000000000..a5ca45e8aa --- /dev/null +++ b/light-clients/06-solomachine/types/header_test.go @@ -0,0 +1,98 @@ +package types_test + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *SoloMachineTestSuite) TestHeaderValidateBasic() { + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + header := solomachine.CreateHeader() + + cases := []struct { + name string + header *types.Header + expPass bool + }{ + { + "valid header", + header, + true, + }, + { + "sequence is zero", + &types.Header{ + Sequence: 0, + Timestamp: header.Timestamp, + Signature: header.Signature, + NewPublicKey: header.NewPublicKey, + NewDiversifier: header.NewDiversifier, + }, + false, + }, + { + "timestamp is zero", + &types.Header{ + Sequence: header.Sequence, + Timestamp: 0, + Signature: header.Signature, + NewPublicKey: header.NewPublicKey, + NewDiversifier: header.NewDiversifier, + }, + false, + }, + { + "signature is empty", + &types.Header{ + Sequence: header.Sequence, + Timestamp: header.Timestamp, + Signature: []byte{}, + NewPublicKey: header.NewPublicKey, + NewDiversifier: header.NewDiversifier, + }, + false, + }, + { + "diversifier contains only spaces", + &types.Header{ + Sequence: header.Sequence, + Timestamp: header.Timestamp, + Signature: header.Signature, + NewPublicKey: header.NewPublicKey, + NewDiversifier: " ", + }, + false, + }, + { + "public key is nil", + &types.Header{ + Sequence: header.Sequence, + Timestamp: header.Timestamp, + Signature: header.Signature, + NewPublicKey: nil, + NewDiversifier: header.NewDiversifier, + }, + false, + }, + } + + suite.Require().Equal(exported.Solomachine, header.ClientType()) + + for _, tc := range cases { + tc := tc + + suite.Run(tc.name, func() { + err := tc.header.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + } +} diff --git a/light-clients/06-solomachine/types/misbehaviour.go b/light-clients/06-solomachine/types/misbehaviour.go new file mode 100644 index 0000000000..f5b218ccf2 --- /dev/null +++ b/light-clients/06-solomachine/types/misbehaviour.go @@ -0,0 +1,83 @@ +package types + +import ( + "bytes" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.Misbehaviour = &Misbehaviour{} + +// ClientType is a Solo Machine light client. +func (misbehaviour Misbehaviour) ClientType() string { + return exported.Solomachine +} + +// GetClientID returns the ID of the client that committed a misbehaviour. +func (misbehaviour Misbehaviour) GetClientID() string { + return misbehaviour.ClientId +} + +// Type implements Evidence interface. +func (misbehaviour Misbehaviour) Type() string { + return exported.TypeClientMisbehaviour +} + +// GetHeight returns the sequence at which misbehaviour occurred. +// Return exported.Height to satisfy interface +// Revision number is always 0 for a solo-machine +func (misbehaviour Misbehaviour) GetHeight() exported.Height { + return clienttypes.NewHeight(0, misbehaviour.Sequence) +} + +// ValidateBasic implements Evidence interface. +func (misbehaviour Misbehaviour) ValidateBasic() error { + if err := host.ClientIdentifierValidator(misbehaviour.ClientId); err != nil { + return sdkerrors.Wrap(err, "invalid client identifier for solo machine") + } + + if misbehaviour.Sequence == 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "sequence cannot be 0") + } + + if err := misbehaviour.SignatureOne.ValidateBasic(); err != nil { + return sdkerrors.Wrap(err, "signature one failed basic validation") + } + + if err := misbehaviour.SignatureTwo.ValidateBasic(); err != nil { + return sdkerrors.Wrap(err, "signature two failed basic validation") + } + + // misbehaviour signatures cannot be identical + if bytes.Equal(misbehaviour.SignatureOne.Signature, misbehaviour.SignatureTwo.Signature) { + return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "misbehaviour signatures cannot be equal") + } + + // message data signed cannot be identical + if bytes.Equal(misbehaviour.SignatureOne.Data, misbehaviour.SignatureTwo.Data) { + return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "misbehaviour signature data must be signed over different messages") + } + + return nil +} + +// ValidateBasic ensures that the signature and data fields are non-empty. +func (sd SignatureAndData) ValidateBasic() error { + if len(sd.Signature) == 0 { + return sdkerrors.Wrap(ErrInvalidSignatureAndData, "signature cannot be empty") + } + if len(sd.Data) == 0 { + return sdkerrors.Wrap(ErrInvalidSignatureAndData, "data for signature cannot be empty") + } + if sd.DataType == UNSPECIFIED { + return sdkerrors.Wrap(ErrInvalidSignatureAndData, "data type cannot be UNSPECIFIED") + } + if sd.Timestamp == 0 { + return sdkerrors.Wrap(ErrInvalidSignatureAndData, "timestamp cannot be 0") + } + + return nil +} diff --git a/light-clients/06-solomachine/types/misbehaviour_handle.go b/light-clients/06-solomachine/types/misbehaviour_handle.go new file mode 100644 index 0000000000..ce5d6351c4 --- /dev/null +++ b/light-clients/06-solomachine/types/misbehaviour_handle.go @@ -0,0 +1,92 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CheckMisbehaviourAndUpdateState determines whether or not the currently registered +// public key signed over two different messages with the same sequence. If this is true +// the client state is updated to a frozen status. +// NOTE: Misbehaviour is not tracked for previous public keys, a solo machine may update to +// a new public key before the misbehaviour is processed. Therefore, misbehaviour is data +// order processing dependent. +func (cs ClientState) CheckMisbehaviourAndUpdateState( + ctx sdk.Context, + cdc codec.BinaryMarshaler, + clientStore sdk.KVStore, + misbehaviour exported.Misbehaviour, +) (exported.ClientState, error) { + + soloMisbehaviour, ok := misbehaviour.(*Misbehaviour) + if !ok { + return nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidClientType, + "misbehaviour type %T, expected %T", misbehaviour, &Misbehaviour{}, + ) + } + + if cs.IsFrozen() { + return nil, sdkerrors.Wrapf(clienttypes.ErrClientFrozen, "client is already frozen") + } + + // NOTE: a check that the misbehaviour message data are not equal is done by + // misbehaviour.ValidateBasic which is called by the 02-client keeper. + + // verify first signature + if err := verifySignatureAndData(cdc, cs, soloMisbehaviour, soloMisbehaviour.SignatureOne); err != nil { + return nil, sdkerrors.Wrap(err, "failed to verify signature one") + } + + // verify second signature + if err := verifySignatureAndData(cdc, cs, soloMisbehaviour, soloMisbehaviour.SignatureTwo); err != nil { + return nil, sdkerrors.Wrap(err, "failed to verify signature two") + } + + cs.FrozenSequence = soloMisbehaviour.Sequence + return &cs, nil +} + +// verifySignatureAndData verifies that the currently registered public key has signed +// over the provided data and that the data is valid. The data is valid if it can be +// unmarshaled into the specified data type. +func verifySignatureAndData(cdc codec.BinaryMarshaler, clientState ClientState, misbehaviour *Misbehaviour, sigAndData *SignatureAndData) error { + + // do not check misbehaviour timestamp since we want to allow processing of past misbehaviour + + // ensure data can be unmarshaled to the specified data type + if _, err := UnmarshalDataByType(cdc, sigAndData.DataType, sigAndData.Data); err != nil { + return err + } + + data, err := MisbehaviourSignBytes( + cdc, + misbehaviour.Sequence, sigAndData.Timestamp, + clientState.ConsensusState.Diversifier, + sigAndData.DataType, + sigAndData.Data, + ) + if err != nil { + return err + } + + sigData, err := UnmarshalSignatureData(cdc, sigAndData.Signature) + if err != nil { + return err + } + + publicKey, err := clientState.ConsensusState.GetPubKey() + if err != nil { + return err + } + + if err := VerifySignature(publicKey, data, sigData); err != nil { + return err + } + + return nil + +} diff --git a/light-clients/06-solomachine/types/misbehaviour_handle_test.go b/light-clients/06-solomachine/types/misbehaviour_handle_test.go new file mode 100644 index 0000000000..97ce22a3ed --- /dev/null +++ b/light-clients/06-solomachine/types/misbehaviour_handle_test.go @@ -0,0 +1,275 @@ +package types_test + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *SoloMachineTestSuite) TestCheckMisbehaviourAndUpdateState() { + var ( + clientState exported.ClientState + misbehaviour exported.Misbehaviour + ) + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + testCases := []struct { + name string + setup func() + expPass bool + }{ + { + "valid misbehaviour", + func() { + clientState = solomachine.ClientState() + misbehaviour = solomachine.CreateMisbehaviour() + }, + true, + }, + { + "old misbehaviour is successful (timestamp is less than current consensus state)", + func() { + clientState = solomachine.ClientState() + solomachine.Time = solomachine.Time - 5 + misbehaviour = solomachine.CreateMisbehaviour() + }, true, + }, + { + "client is frozen", + func() { + cs := solomachine.ClientState() + cs.FrozenSequence = 1 + clientState = cs + misbehaviour = solomachine.CreateMisbehaviour() + }, + false, + }, + { + "wrong client state type", + func() { + clientState = &ibctmtypes.ClientState{} + misbehaviour = solomachine.CreateMisbehaviour() + }, + false, + }, + { + "invalid misbehaviour type", + func() { + clientState = solomachine.ClientState() + misbehaviour = &ibctmtypes.Misbehaviour{} + }, + false, + }, + { + "invalid SignatureOne SignatureData", + func() { + clientState = solomachine.ClientState() + m := solomachine.CreateMisbehaviour() + + m.SignatureOne.Signature = suite.GetInvalidProof() + misbehaviour = m + }, false, + }, + { + "invalid SignatureTwo SignatureData", + func() { + clientState = solomachine.ClientState() + m := solomachine.CreateMisbehaviour() + + m.SignatureTwo.Signature = suite.GetInvalidProof() + misbehaviour = m + }, false, + }, + { + "invalid SignatureOne timestamp", + func() { + clientState = solomachine.ClientState() + m := solomachine.CreateMisbehaviour() + + m.SignatureOne.Timestamp = 1000000000000 + misbehaviour = m + }, false, + }, + { + "invalid SignatureTwo timestamp", + func() { + clientState = solomachine.ClientState() + m := solomachine.CreateMisbehaviour() + + m.SignatureTwo.Timestamp = 1000000000000 + misbehaviour = m + }, false, + }, + { + "invalid first signature data", + func() { + clientState = solomachine.ClientState() + + // store in temp before assigning to interface type + m := solomachine.CreateMisbehaviour() + + msg := []byte("DATA ONE") + signBytes := &types.SignBytes{ + Sequence: solomachine.Sequence + 1, + Timestamp: solomachine.Time, + Diversifier: solomachine.Diversifier, + DataType: types.CLIENT, + Data: msg, + } + + data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(data) + + m.SignatureOne.Signature = sig + m.SignatureOne.Data = msg + misbehaviour = m + }, + false, + }, + { + "invalid second signature data", + func() { + clientState = solomachine.ClientState() + + // store in temp before assigning to interface type + m := solomachine.CreateMisbehaviour() + + msg := []byte("DATA TWO") + signBytes := &types.SignBytes{ + Sequence: solomachine.Sequence + 1, + Timestamp: solomachine.Time, + Diversifier: solomachine.Diversifier, + DataType: types.CLIENT, + Data: msg, + } + + data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(data) + + m.SignatureTwo.Signature = sig + m.SignatureTwo.Data = msg + misbehaviour = m + }, + false, + }, + { + "wrong pubkey generates first signature", + func() { + clientState = solomachine.ClientState() + badMisbehaviour := solomachine.CreateMisbehaviour() + + // update public key to a new one + solomachine.CreateHeader() + m := solomachine.CreateMisbehaviour() + + // set SignatureOne to use the wrong signature + m.SignatureOne = badMisbehaviour.SignatureOne + misbehaviour = m + }, false, + }, + { + "wrong pubkey generates second signature", + func() { + clientState = solomachine.ClientState() + badMisbehaviour := solomachine.CreateMisbehaviour() + + // update public key to a new one + solomachine.CreateHeader() + m := solomachine.CreateMisbehaviour() + + // set SignatureTwo to use the wrong signature + m.SignatureTwo = badMisbehaviour.SignatureTwo + misbehaviour = m + }, false, + }, + + { + "signatures sign over different sequence", + func() { + clientState = solomachine.ClientState() + + // store in temp before assigning to interface type + m := solomachine.CreateMisbehaviour() + + // Signature One + msg := []byte("DATA ONE") + // sequence used is plus 1 + signBytes := &types.SignBytes{ + Sequence: solomachine.Sequence + 1, + Timestamp: solomachine.Time, + Diversifier: solomachine.Diversifier, + DataType: types.CLIENT, + Data: msg, + } + + data, err := suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(data) + + m.SignatureOne.Signature = sig + m.SignatureOne.Data = msg + + // Signature Two + msg = []byte("DATA TWO") + // sequence used is minus 1 + + signBytes = &types.SignBytes{ + Sequence: solomachine.Sequence - 1, + Timestamp: solomachine.Time, + Diversifier: solomachine.Diversifier, + DataType: types.CLIENT, + Data: msg, + } + data, err = suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + + sig = solomachine.GenerateSignature(data) + + m.SignatureTwo.Signature = sig + m.SignatureTwo.Data = msg + + misbehaviour = m + + }, + false, + }, + { + "consensus state pubkey is nil", + func() { + cs := solomachine.ClientState() + cs.ConsensusState.PublicKey = nil + clientState = cs + misbehaviour = solomachine.CreateMisbehaviour() + }, + false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + // setup test + tc.setup() + + clientState, err := clientState.CheckMisbehaviourAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), suite.store, misbehaviour) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().True(clientState.IsFrozen(), "client not frozen") + } else { + suite.Require().Error(err) + suite.Require().Nil(clientState) + } + }) + } + } +} diff --git a/light-clients/06-solomachine/types/misbehaviour_test.go b/light-clients/06-solomachine/types/misbehaviour_test.go new file mode 100644 index 0000000000..7c1f9168aa --- /dev/null +++ b/light-clients/06-solomachine/types/misbehaviour_test.go @@ -0,0 +1,132 @@ +package types_test + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *SoloMachineTestSuite) TestMisbehaviour() { + misbehaviour := suite.solomachine.CreateMisbehaviour() + + suite.Require().Equal(exported.Solomachine, misbehaviour.ClientType()) + suite.Require().Equal(suite.solomachine.ClientID, misbehaviour.GetClientID()) + suite.Require().Equal(uint64(0), misbehaviour.GetHeight().GetRevisionNumber()) + suite.Require().Equal(suite.solomachine.Sequence, misbehaviour.GetHeight().GetRevisionHeight()) +} + +func (suite *SoloMachineTestSuite) TestMisbehaviourValidateBasic() { + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + testCases := []struct { + name string + malleateMisbehaviour func(misbehaviour *types.Misbehaviour) + expPass bool + }{ + { + "valid misbehaviour", + func(*types.Misbehaviour) {}, + true, + }, + { + "invalid client ID", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.ClientId = "(badclientid)" + }, + false, + }, + { + "sequence is zero", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.Sequence = 0 + }, + false, + }, + { + "signature one sig is empty", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureOne.Signature = []byte{} + }, + false, + }, + { + "signature two sig is empty", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureTwo.Signature = []byte{} + }, + false, + }, + { + "signature one data is empty", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureOne.Data = nil + }, + false, + }, + { + "signature two data is empty", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureTwo.Data = []byte{} + }, + false, + }, + { + "signatures are identical", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureTwo.Signature = misbehaviour.SignatureOne.Signature + }, + false, + }, + { + "data signed is identical", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureTwo.Data = misbehaviour.SignatureOne.Data + }, + false, + }, + { + "data type for SignatureOne is unspecified", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureOne.DataType = types.UNSPECIFIED + }, false, + }, + { + "data type for SignatureTwo is unspecified", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureTwo.DataType = types.UNSPECIFIED + }, false, + }, + { + "timestamp for SignatureOne is zero", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureOne.Timestamp = 0 + }, false, + }, + { + "timestamp for SignatureTwo is zero", + func(misbehaviour *types.Misbehaviour) { + misbehaviour.SignatureTwo.Timestamp = 0 + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + + misbehaviour := solomachine.CreateMisbehaviour() + tc.malleateMisbehaviour(misbehaviour) + + err := misbehaviour.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + } +} diff --git a/light-clients/06-solomachine/types/proof.go b/light-clients/06-solomachine/types/proof.go new file mode 100644 index 0000000000..6c2e0b8428 --- /dev/null +++ b/light-clients/06-solomachine/types/proof.go @@ -0,0 +1,475 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/crypto/types/multisig" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// VerifySignature verifies if the the provided public key generated the signature +// over the given data. Single and Multi signature public keys are supported. +// The signature data type must correspond to the public key type. An error is +// returned if signature verification fails or an invalid SignatureData type is +// provided. +func VerifySignature(pubKey cryptotypes.PubKey, signBytes []byte, sigData signing.SignatureData) error { + switch pubKey := pubKey.(type) { + case multisig.PubKey: + data, ok := sigData.(*signing.MultiSignatureData) + if !ok { + return sdkerrors.Wrapf(ErrSignatureVerificationFailed, "invalid signature data type, expected %T, got %T", (*signing.MultiSignatureData)(nil), data) + } + + // The function supplied fulfills the VerifyMultisignature interface. No special + // adjustments need to be made to the sign bytes based on the sign mode. + if err := pubKey.VerifyMultisignature(func(signing.SignMode) ([]byte, error) { + return signBytes, nil + }, data); err != nil { + return err + } + + default: + data, ok := sigData.(*signing.SingleSignatureData) + if !ok { + return sdkerrors.Wrapf(ErrSignatureVerificationFailed, "invalid signature data type, expected %T, got %T", (*signing.SingleSignatureData)(nil), data) + } + + if !pubKey.VerifySignature(signBytes, data.Signature) { + return ErrSignatureVerificationFailed + } + } + + return nil +} + +// MisbehaviourSignBytes returns the sign bytes for verification of misbehaviour. +func MisbehaviourSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + dataType DataType, + data []byte) ([]byte, error) { + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: dataType, + Data: data, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// HeaderSignBytes returns the sign bytes for verification of misbehaviour. +func HeaderSignBytes( + cdc codec.BinaryMarshaler, + header *Header, +) ([]byte, error) { + data := &HeaderData{ + NewPubKey: header.NewPublicKey, + NewDiversifier: header.NewDiversifier, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: header.Sequence, + Timestamp: header.Timestamp, + Diversifier: header.NewDiversifier, + DataType: HEADER, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// ClientStateSignBytes returns the sign bytes for verification of the +// client state. +func ClientStateSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + path commitmenttypes.MerklePath, + clientState exported.ClientState, +) ([]byte, error) { + dataBz, err := ClientStateDataBytes(cdc, path, clientState) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: CLIENT, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// ClientStateDataBytes returns the client state data bytes used in constructing +// SignBytes. +func ClientStateDataBytes( + cdc codec.BinaryMarshaler, + path commitmenttypes.MerklePath, // nolint: interfacer + clientState exported.ClientState, +) ([]byte, error) { + any, err := clienttypes.PackClientState(clientState) + if err != nil { + return nil, err + } + + data := &ClientStateData{ + Path: []byte(path.String()), + ClientState: any, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + return dataBz, nil +} + +// ConsensusStateSignBytes returns the sign bytes for verification of the +// consensus state. +func ConsensusStateSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + path commitmenttypes.MerklePath, + consensusState exported.ConsensusState, +) ([]byte, error) { + dataBz, err := ConsensusStateDataBytes(cdc, path, consensusState) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: CONSENSUS, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// ConsensusStateDataBytes returns the consensus state data bytes used in constructing +// SignBytes. +func ConsensusStateDataBytes( + cdc codec.BinaryMarshaler, + path commitmenttypes.MerklePath, // nolint: interfacer + consensusState exported.ConsensusState, +) ([]byte, error) { + any, err := clienttypes.PackConsensusState(consensusState) + if err != nil { + return nil, err + } + + data := &ConsensusStateData{ + Path: []byte(path.String()), + ConsensusState: any, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + return dataBz, nil +} + +// ConnectionStateSignBytes returns the sign bytes for verification of the +// connection state. +func ConnectionStateSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + path commitmenttypes.MerklePath, + connectionEnd exported.ConnectionI, +) ([]byte, error) { + dataBz, err := ConnectionStateDataBytes(cdc, path, connectionEnd) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: CONNECTION, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// ConnectionStateDataBytes returns the connection state data bytes used in constructing +// SignBytes. +func ConnectionStateDataBytes( + cdc codec.BinaryMarshaler, + path commitmenttypes.MerklePath, // nolint: interfacer + connectionEnd exported.ConnectionI, +) ([]byte, error) { + connection, ok := connectionEnd.(connectiontypes.ConnectionEnd) + if !ok { + return nil, sdkerrors.Wrapf( + connectiontypes.ErrInvalidConnection, + "expected type %T, got %T", connectiontypes.ConnectionEnd{}, connectionEnd, + ) + } + + data := &ConnectionStateData{ + Path: []byte(path.String()), + Connection: &connection, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + return dataBz, nil +} + +// ChannelStateSignBytes returns the sign bytes for verification of the +// channel state. +func ChannelStateSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + path commitmenttypes.MerklePath, + channelEnd exported.ChannelI, +) ([]byte, error) { + dataBz, err := ChannelStateDataBytes(cdc, path, channelEnd) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: CHANNEL, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// ChannelStateDataBytes returns the channel state data bytes used in constructing +// SignBytes. +func ChannelStateDataBytes( + cdc codec.BinaryMarshaler, + path commitmenttypes.MerklePath, // nolint: interfacer + channelEnd exported.ChannelI, +) ([]byte, error) { + channel, ok := channelEnd.(channeltypes.Channel) + if !ok { + return nil, sdkerrors.Wrapf( + channeltypes.ErrInvalidChannel, + "expected channel type %T, got %T", channeltypes.Channel{}, channelEnd) + } + + data := &ChannelStateData{ + Path: []byte(path.String()), + Channel: &channel, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + return dataBz, nil +} + +// PacketCommitmentSignBytes returns the sign bytes for verification of the +// packet commitment. +func PacketCommitmentSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + path commitmenttypes.MerklePath, + commitmentBytes []byte, +) ([]byte, error) { + dataBz, err := PacketCommitmentDataBytes(cdc, path, commitmentBytes) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: PACKETCOMMITMENT, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// PacketCommitmentDataBytes returns the packet commitment data bytes used in constructing +// SignBytes. +func PacketCommitmentDataBytes( + cdc codec.BinaryMarshaler, + path commitmenttypes.MerklePath, // nolint: interfacer + commitmentBytes []byte, +) ([]byte, error) { + data := &PacketCommitmentData{ + Path: []byte(path.String()), + Commitment: commitmentBytes, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + return dataBz, nil +} + +// PacketAcknowledgementSignBytes returns the sign bytes for verification of +// the acknowledgement. +func PacketAcknowledgementSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + path commitmenttypes.MerklePath, + acknowledgement []byte, +) ([]byte, error) { + dataBz, err := PacketAcknowledgementDataBytes(cdc, path, acknowledgement) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: PACKETACKNOWLEDGEMENT, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// PacketAcknowledgementDataBytes returns the packet acknowledgement data bytes used in constructing +// SignBytes. +func PacketAcknowledgementDataBytes( + cdc codec.BinaryMarshaler, + path commitmenttypes.MerklePath, // nolint: interfacer + acknowledgement []byte, +) ([]byte, error) { + data := &PacketAcknowledgementData{ + Path: []byte(path.String()), + Acknowledgement: acknowledgement, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + return dataBz, nil +} + +// PacketReceiptAbsenceSignBytes returns the sign bytes for verification +// of the absence of an receipt. +func PacketReceiptAbsenceSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + path commitmenttypes.MerklePath, +) ([]byte, error) { + dataBz, err := PacketReceiptAbsenceDataBytes(cdc, path) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: PACKETRECEIPTABSENCE, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// PacketReceiptAbsenceDataBytes returns the packet receipt absence data bytes +// used in constructing SignBytes. +func PacketReceiptAbsenceDataBytes( + cdc codec.BinaryMarshaler, + path commitmenttypes.MerklePath, // nolint: interfacer +) ([]byte, error) { + data := &PacketReceiptAbsenceData{ + Path: []byte(path.String()), + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + return dataBz, nil +} + +// NextSequenceRecvSignBytes returns the sign bytes for verification of the next +// sequence to be received. +func NextSequenceRecvSignBytes( + cdc codec.BinaryMarshaler, + sequence, timestamp uint64, + diversifier string, + path commitmenttypes.MerklePath, + nextSequenceRecv uint64, +) ([]byte, error) { + dataBz, err := NextSequenceRecvDataBytes(cdc, path, nextSequenceRecv) + if err != nil { + return nil, err + } + + signBytes := &SignBytes{ + Sequence: sequence, + Timestamp: timestamp, + Diversifier: diversifier, + DataType: NEXTSEQUENCERECV, + Data: dataBz, + } + + return cdc.MarshalBinaryBare(signBytes) +} + +// NextSequenceRecvDataBytes returns the next sequence recv data bytes used in constructing +// SignBytes. +func NextSequenceRecvDataBytes( + cdc codec.BinaryMarshaler, + path commitmenttypes.MerklePath, // nolint: interfacer + nextSequenceRecv uint64, +) ([]byte, error) { + data := &NextSequenceRecvData{ + Path: []byte(path.String()), + NextSeqRecv: nextSequenceRecv, + } + + dataBz, err := cdc.MarshalBinaryBare(data) + if err != nil { + return nil, err + } + + return dataBz, nil +} diff --git a/light-clients/06-solomachine/types/proof_test.go b/light-clients/06-solomachine/types/proof_test.go new file mode 100644 index 0000000000..e2ba679a5b --- /dev/null +++ b/light-clients/06-solomachine/types/proof_test.go @@ -0,0 +1,102 @@ +package types_test + +import ( + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *SoloMachineTestSuite) TestVerifySignature() { + cdc := suite.chainA.App.AppCodec() + signBytes := []byte("sign bytes") + + singleSignature := suite.solomachine.GenerateSignature(signBytes) + singleSigData, err := solomachinetypes.UnmarshalSignatureData(cdc, singleSignature) + suite.Require().NoError(err) + + multiSignature := suite.solomachineMulti.GenerateSignature(signBytes) + multiSigData, err := solomachinetypes.UnmarshalSignatureData(cdc, multiSignature) + suite.Require().NoError(err) + + testCases := []struct { + name string + publicKey cryptotypes.PubKey + sigData signing.SignatureData + expPass bool + }{ + { + "single signature with regular public key", + suite.solomachine.PublicKey, + singleSigData, + true, + }, + { + "multi signature with multisig public key", + suite.solomachineMulti.PublicKey, + multiSigData, + true, + }, + { + "single signature with multisig public key", + suite.solomachineMulti.PublicKey, + singleSigData, + false, + }, + { + "multi signature with regular public key", + suite.solomachine.PublicKey, + multiSigData, + false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + err := solomachinetypes.VerifySignature(tc.publicKey, signBytes, tc.sigData) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *SoloMachineTestSuite) TestClientStateSignBytes() { + cdc := suite.chainA.App.AppCodec() + + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + // success + path := solomachine.GetClientStatePath(counterpartyClientIdentifier) + bz, err := types.ClientStateSignBytes(cdc, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, solomachine.ClientState()) + suite.Require().NoError(err) + suite.Require().NotNil(bz) + + // nil client state + bz, err = types.ClientStateSignBytes(cdc, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, nil) + suite.Require().Error(err) + suite.Require().Nil(bz) + } +} + +func (suite *SoloMachineTestSuite) TestConsensusStateSignBytes() { + cdc := suite.chainA.App.AppCodec() + + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + // success + path := solomachine.GetConsensusStatePath(counterpartyClientIdentifier, consensusHeight) + bz, err := types.ConsensusStateSignBytes(cdc, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, solomachine.ConsensusState()) + suite.Require().NoError(err) + suite.Require().NotNil(bz) + + // nil consensus state + bz, err = types.ConsensusStateSignBytes(cdc, solomachine.Sequence, solomachine.Time, solomachine.Diversifier, path, nil) + suite.Require().Error(err) + suite.Require().Nil(bz) + } +} diff --git a/light-clients/06-solomachine/types/proposal_handle.go b/light-clients/06-solomachine/types/proposal_handle.go new file mode 100644 index 0000000000..e38155b236 --- /dev/null +++ b/light-clients/06-solomachine/types/proposal_handle.go @@ -0,0 +1,64 @@ +package types + +import ( + "reflect" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CheckSubstituteAndUpdateState verifies that the subject is allowed to be updated by +// a governance proposal and that the substitute client is a solo machine. +// It will update the consensus state to the substitute's consensus state and +// the sequence to the substitute's current sequence. An error is returned if +// the client has been disallowed to be updated by a governance proposal, +// the substitute is not a solo machine, or the current public key equals +// the new public key. +func (cs ClientState) CheckSubstituteAndUpdateState( + ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore, + _ sdk.KVStore, substituteClient exported.ClientState, + _ exported.Height, +) (exported.ClientState, error) { + + if !cs.AllowUpdateAfterProposal { + return nil, sdkerrors.Wrapf( + clienttypes.ErrUpdateClientFailed, + "solo machine client is not allowed to updated with a proposal", + ) + } + + substituteClientState, ok := substituteClient.(*ClientState) + if !ok { + return nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidClientType, "substitute client state type %T, expected %T", substituteClient, &ClientState{}, + ) + } + + subjectPublicKey, err := cs.ConsensusState.GetPubKey() + if err != nil { + return nil, sdkerrors.Wrap(err, "failed to get consensus public key") + } + + substitutePublicKey, err := substituteClientState.ConsensusState.GetPubKey() + if err != nil { + return nil, sdkerrors.Wrap(err, "failed to get substitute client public key") + } + + if reflect.DeepEqual(subjectPublicKey, substitutePublicKey) { + return nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidHeader, "subject and substitute have the same public key", + ) + } + + clientState := &cs + + // update to substitute parameters + clientState.Sequence = substituteClientState.Sequence + clientState.ConsensusState = substituteClientState.ConsensusState + clientState.FrozenSequence = 0 + + return clientState, nil +} diff --git a/light-clients/06-solomachine/types/proposal_handle_test.go b/light-clients/06-solomachine/types/proposal_handle_test.go new file mode 100644 index 0000000000..0113da1044 --- /dev/null +++ b/light-clients/06-solomachine/types/proposal_handle_test.go @@ -0,0 +1,88 @@ +package types_test + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *SoloMachineTestSuite) TestCheckSubstituteAndUpdateState() { + var ( + subjectClientState *types.ClientState + substituteClientState exported.ClientState + ) + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "valid substitute", func() { + subjectClientState.AllowUpdateAfterProposal = true + }, true, + }, + { + "subject not allowed to be updated", func() { + subjectClientState.AllowUpdateAfterProposal = false + }, false, + }, + { + "substitute is not the solo machine", func() { + substituteClientState = &ibctmtypes.ClientState{} + }, false, + }, + { + "subject public key is nil", func() { + subjectClientState.ConsensusState.PublicKey = nil + }, false, + }, + + { + "substitute public key is nil", func() { + substituteClientState.(*types.ClientState).ConsensusState.PublicKey = nil + }, false, + }, + { + "subject and substitute use the same public key", func() { + substituteClientState.(*types.ClientState).ConsensusState.PublicKey = subjectClientState.ConsensusState.PublicKey + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + subjectClientState = solomachine.ClientState() + subjectClientState.AllowUpdateAfterProposal = true + substitute := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "substitute", "testing", 5) + substituteClientState = substitute.ClientState() + + tc.malleate() + + subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), solomachine.ClientID) + substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute.ClientID) + + updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, nil) + + if tc.expPass { + suite.Require().NoError(err) + + suite.Require().Equal(substituteClientState.(*types.ClientState).ConsensusState, updatedClient.(*types.ClientState).ConsensusState) + suite.Require().Equal(substituteClientState.(*types.ClientState).Sequence, updatedClient.(*types.ClientState).Sequence) + suite.Require().Equal(uint64(0), updatedClient.(*types.ClientState).FrozenSequence) + } else { + suite.Require().Error(err) + suite.Require().Nil(updatedClient) + } + }) + } + } +} diff --git a/light-clients/06-solomachine/types/solomachine.go b/light-clients/06-solomachine/types/solomachine.go new file mode 100644 index 0000000000..d3936ef427 --- /dev/null +++ b/light-clients/06-solomachine/types/solomachine.go @@ -0,0 +1,43 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// Interface implementation checks. +var _, _, _, _ codectypes.UnpackInterfacesMessage = &ClientState{}, &ConsensusState{}, &Header{}, &HeaderData{} + +// Data is an interface used for all the signature data bytes proto definitions. +type Data interface{} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (cs ClientState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return cs.ConsensusState.UnpackInterfaces(unpacker) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (cs ConsensusState) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(cs.PublicKey, new(cryptotypes.PubKey)) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (h Header) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(h.NewPublicKey, new(cryptotypes.PubKey)) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (hd HeaderData) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(hd.NewPubKey, new(cryptotypes.PubKey)) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (csd ClientStateData) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(csd.ClientState, new(exported.ClientState)) +} + +// UnpackInterfaces implements the UnpackInterfaceMessages.UnpackInterfaces method +func (csd ConsensusStateData) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(csd.ConsensusState, new(exported.ConsensusState)) +} diff --git a/light-clients/06-solomachine/types/solomachine.pb.go b/light-clients/06-solomachine/types/solomachine.pb.go new file mode 100644 index 0000000000..90c4110d42 --- /dev/null +++ b/light-clients/06-solomachine/types/solomachine.pb.go @@ -0,0 +1,4121 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/lightclients/solomachine/v1/solomachine.proto + +package types + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/codec/types" + types1 "github.com/cosmos/ibc-go/core/03-connection/types" + types2 "github.com/cosmos/ibc-go/core/04-channel/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DataType defines the type of solo machine proof being created. This is done +// to preserve uniqueness of different data sign byte encodings. +type DataType int32 + +const ( + // Default State + UNSPECIFIED DataType = 0 + // Data type for client state verification + CLIENT DataType = 1 + // Data type for consensus state verification + CONSENSUS DataType = 2 + // Data type for connection state verification + CONNECTION DataType = 3 + // Data type for channel state verification + CHANNEL DataType = 4 + // Data type for packet commitment verification + PACKETCOMMITMENT DataType = 5 + // Data type for packet acknowledgement verification + PACKETACKNOWLEDGEMENT DataType = 6 + // Data type for packet receipt absence verification + PACKETRECEIPTABSENCE DataType = 7 + // Data type for next sequence recv verification + NEXTSEQUENCERECV DataType = 8 + // Data type for header verification + HEADER DataType = 9 +) + +var DataType_name = map[int32]string{ + 0: "DATA_TYPE_UNINITIALIZED_UNSPECIFIED", + 1: "DATA_TYPE_CLIENT_STATE", + 2: "DATA_TYPE_CONSENSUS_STATE", + 3: "DATA_TYPE_CONNECTION_STATE", + 4: "DATA_TYPE_CHANNEL_STATE", + 5: "DATA_TYPE_PACKET_COMMITMENT", + 6: "DATA_TYPE_PACKET_ACKNOWLEDGEMENT", + 7: "DATA_TYPE_PACKET_RECEIPT_ABSENCE", + 8: "DATA_TYPE_NEXT_SEQUENCE_RECV", + 9: "DATA_TYPE_HEADER", +} + +var DataType_value = map[string]int32{ + "DATA_TYPE_UNINITIALIZED_UNSPECIFIED": 0, + "DATA_TYPE_CLIENT_STATE": 1, + "DATA_TYPE_CONSENSUS_STATE": 2, + "DATA_TYPE_CONNECTION_STATE": 3, + "DATA_TYPE_CHANNEL_STATE": 4, + "DATA_TYPE_PACKET_COMMITMENT": 5, + "DATA_TYPE_PACKET_ACKNOWLEDGEMENT": 6, + "DATA_TYPE_PACKET_RECEIPT_ABSENCE": 7, + "DATA_TYPE_NEXT_SEQUENCE_RECV": 8, + "DATA_TYPE_HEADER": 9, +} + +func (x DataType) String() string { + return proto.EnumName(DataType_name, int32(x)) +} + +func (DataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{0} +} + +// ClientState defines a solo machine client that tracks the current consensus +// state and if the client is frozen. +type ClientState struct { + // latest sequence of the client state + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + // frozen sequence of the solo machine + FrozenSequence uint64 `protobuf:"varint,2,opt,name=frozen_sequence,json=frozenSequence,proto3" json:"frozen_sequence,omitempty" yaml:"frozen_sequence"` + ConsensusState *ConsensusState `protobuf:"bytes,3,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` + // when set to true, will allow governance to update a solo machine client. + // The client will be unfrozen if it is frozen. + AllowUpdateAfterProposal bool `protobuf:"varint,4,opt,name=allow_update_after_proposal,json=allowUpdateAfterProposal,proto3" json:"allow_update_after_proposal,omitempty" yaml:"allow_update_after_proposal"` +} + +func (m *ClientState) Reset() { *m = ClientState{} } +func (m *ClientState) String() string { return proto.CompactTextString(m) } +func (*ClientState) ProtoMessage() {} +func (*ClientState) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{0} +} +func (m *ClientState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientState.Merge(m, src) +} +func (m *ClientState) XXX_Size() int { + return m.Size() +} +func (m *ClientState) XXX_DiscardUnknown() { + xxx_messageInfo_ClientState.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientState proto.InternalMessageInfo + +// ConsensusState defines a solo machine consensus state. The sequence of a +// consensus state is contained in the "height" key used in storing the +// consensus state. +type ConsensusState struct { + // public key of the solo machine + PublicKey *types.Any `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty" yaml:"public_key"` + // diversifier allows the same public key to be re-used across different solo + // machine clients (potentially on different chains) without being considered + // misbehaviour. + Diversifier string `protobuf:"bytes,2,opt,name=diversifier,proto3" json:"diversifier,omitempty"` + Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *ConsensusState) Reset() { *m = ConsensusState{} } +func (m *ConsensusState) String() string { return proto.CompactTextString(m) } +func (*ConsensusState) ProtoMessage() {} +func (*ConsensusState) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{1} +} +func (m *ConsensusState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusState.Merge(m, src) +} +func (m *ConsensusState) XXX_Size() int { + return m.Size() +} +func (m *ConsensusState) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusState.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusState proto.InternalMessageInfo + +// Header defines a solo machine consensus header +type Header struct { + // sequence to update solo machine public key at + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` + NewPublicKey *types.Any `protobuf:"bytes,4,opt,name=new_public_key,json=newPublicKey,proto3" json:"new_public_key,omitempty" yaml:"new_public_key"` + NewDiversifier string `protobuf:"bytes,5,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{2} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +// Misbehaviour defines misbehaviour for a solo machine which consists +// of a sequence and two signatures over different messages at that sequence. +type Misbehaviour struct { + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + Sequence uint64 `protobuf:"varint,2,opt,name=sequence,proto3" json:"sequence,omitempty"` + SignatureOne *SignatureAndData `protobuf:"bytes,3,opt,name=signature_one,json=signatureOne,proto3" json:"signature_one,omitempty" yaml:"signature_one"` + SignatureTwo *SignatureAndData `protobuf:"bytes,4,opt,name=signature_two,json=signatureTwo,proto3" json:"signature_two,omitempty" yaml:"signature_two"` +} + +func (m *Misbehaviour) Reset() { *m = Misbehaviour{} } +func (m *Misbehaviour) String() string { return proto.CompactTextString(m) } +func (*Misbehaviour) ProtoMessage() {} +func (*Misbehaviour) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{3} +} +func (m *Misbehaviour) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Misbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Misbehaviour.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Misbehaviour) XXX_Merge(src proto.Message) { + xxx_messageInfo_Misbehaviour.Merge(m, src) +} +func (m *Misbehaviour) XXX_Size() int { + return m.Size() +} +func (m *Misbehaviour) XXX_DiscardUnknown() { + xxx_messageInfo_Misbehaviour.DiscardUnknown(m) +} + +var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo + +// SignatureAndData contains a signature and the data signed over to create that +// signature. +type SignatureAndData struct { + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + DataType DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=ibcgo.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *SignatureAndData) Reset() { *m = SignatureAndData{} } +func (m *SignatureAndData) String() string { return proto.CompactTextString(m) } +func (*SignatureAndData) ProtoMessage() {} +func (*SignatureAndData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{4} +} +func (m *SignatureAndData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureAndData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignatureAndData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignatureAndData) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureAndData.Merge(m, src) +} +func (m *SignatureAndData) XXX_Size() int { + return m.Size() +} +func (m *SignatureAndData) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureAndData.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureAndData proto.InternalMessageInfo + +// TimestampedSignatureData contains the signature data and the timestamp of the +// signature. +type TimestampedSignatureData struct { + SignatureData []byte `protobuf:"bytes,1,opt,name=signature_data,json=signatureData,proto3" json:"signature_data,omitempty" yaml:"signature_data"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *TimestampedSignatureData) Reset() { *m = TimestampedSignatureData{} } +func (m *TimestampedSignatureData) String() string { return proto.CompactTextString(m) } +func (*TimestampedSignatureData) ProtoMessage() {} +func (*TimestampedSignatureData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{5} +} +func (m *TimestampedSignatureData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimestampedSignatureData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimestampedSignatureData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimestampedSignatureData) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimestampedSignatureData.Merge(m, src) +} +func (m *TimestampedSignatureData) XXX_Size() int { + return m.Size() +} +func (m *TimestampedSignatureData) XXX_DiscardUnknown() { + xxx_messageInfo_TimestampedSignatureData.DiscardUnknown(m) +} + +var xxx_messageInfo_TimestampedSignatureData proto.InternalMessageInfo + +// SignBytes defines the signed bytes used for signature verification. +type SignBytes struct { + Sequence uint64 `protobuf:"varint,1,opt,name=sequence,proto3" json:"sequence,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Diversifier string `protobuf:"bytes,3,opt,name=diversifier,proto3" json:"diversifier,omitempty"` + // type of the data used + DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=ibcgo.lightclients.solomachine.v1.DataType" json:"data_type,omitempty" yaml:"data_type"` + // marshaled data + Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *SignBytes) Reset() { *m = SignBytes{} } +func (m *SignBytes) String() string { return proto.CompactTextString(m) } +func (*SignBytes) ProtoMessage() {} +func (*SignBytes) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{6} +} +func (m *SignBytes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignBytes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignBytes) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignBytes.Merge(m, src) +} +func (m *SignBytes) XXX_Size() int { + return m.Size() +} +func (m *SignBytes) XXX_DiscardUnknown() { + xxx_messageInfo_SignBytes.DiscardUnknown(m) +} + +var xxx_messageInfo_SignBytes proto.InternalMessageInfo + +// HeaderData returns the SignBytes data for update verification. +type HeaderData struct { + // header public key + NewPubKey *types.Any `protobuf:"bytes,1,opt,name=new_pub_key,json=newPubKey,proto3" json:"new_pub_key,omitempty" yaml:"new_pub_key"` + // header diversifier + NewDiversifier string `protobuf:"bytes,2,opt,name=new_diversifier,json=newDiversifier,proto3" json:"new_diversifier,omitempty" yaml:"new_diversifier"` +} + +func (m *HeaderData) Reset() { *m = HeaderData{} } +func (m *HeaderData) String() string { return proto.CompactTextString(m) } +func (*HeaderData) ProtoMessage() {} +func (*HeaderData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{7} +} +func (m *HeaderData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HeaderData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HeaderData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HeaderData) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderData.Merge(m, src) +} +func (m *HeaderData) XXX_Size() int { + return m.Size() +} +func (m *HeaderData) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderData.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderData proto.InternalMessageInfo + +// ClientStateData returns the SignBytes data for client state verification. +type ClientStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + ClientState *types.Any `protobuf:"bytes,2,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty" yaml:"client_state"` +} + +func (m *ClientStateData) Reset() { *m = ClientStateData{} } +func (m *ClientStateData) String() string { return proto.CompactTextString(m) } +func (*ClientStateData) ProtoMessage() {} +func (*ClientStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{8} +} +func (m *ClientStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStateData.Merge(m, src) +} +func (m *ClientStateData) XXX_Size() int { + return m.Size() +} +func (m *ClientStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStateData proto.InternalMessageInfo + +// ConsensusStateData returns the SignBytes data for consensus state +// verification. +type ConsensusStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + ConsensusState *types.Any `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty" yaml:"consensus_state"` +} + +func (m *ConsensusStateData) Reset() { *m = ConsensusStateData{} } +func (m *ConsensusStateData) String() string { return proto.CompactTextString(m) } +func (*ConsensusStateData) ProtoMessage() {} +func (*ConsensusStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{9} +} +func (m *ConsensusStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusStateData.Merge(m, src) +} +func (m *ConsensusStateData) XXX_Size() int { + return m.Size() +} +func (m *ConsensusStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusStateData proto.InternalMessageInfo + +// ConnectionStateData returns the SignBytes data for connection state +// verification. +type ConnectionStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Connection *types1.ConnectionEnd `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` +} + +func (m *ConnectionStateData) Reset() { *m = ConnectionStateData{} } +func (m *ConnectionStateData) String() string { return proto.CompactTextString(m) } +func (*ConnectionStateData) ProtoMessage() {} +func (*ConnectionStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{10} +} +func (m *ConnectionStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConnectionStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConnectionStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConnectionStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionStateData.Merge(m, src) +} +func (m *ConnectionStateData) XXX_Size() int { + return m.Size() +} +func (m *ConnectionStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionStateData proto.InternalMessageInfo + +// ChannelStateData returns the SignBytes data for channel state +// verification. +type ChannelStateData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Channel *types2.Channel `protobuf:"bytes,2,opt,name=channel,proto3" json:"channel,omitempty"` +} + +func (m *ChannelStateData) Reset() { *m = ChannelStateData{} } +func (m *ChannelStateData) String() string { return proto.CompactTextString(m) } +func (*ChannelStateData) ProtoMessage() {} +func (*ChannelStateData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{11} +} +func (m *ChannelStateData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChannelStateData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChannelStateData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChannelStateData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChannelStateData.Merge(m, src) +} +func (m *ChannelStateData) XXX_Size() int { + return m.Size() +} +func (m *ChannelStateData) XXX_DiscardUnknown() { + xxx_messageInfo_ChannelStateData.DiscardUnknown(m) +} + +var xxx_messageInfo_ChannelStateData proto.InternalMessageInfo + +// PacketCommitmentData returns the SignBytes data for packet commitment +// verification. +type PacketCommitmentData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Commitment []byte `protobuf:"bytes,2,opt,name=commitment,proto3" json:"commitment,omitempty"` +} + +func (m *PacketCommitmentData) Reset() { *m = PacketCommitmentData{} } +func (m *PacketCommitmentData) String() string { return proto.CompactTextString(m) } +func (*PacketCommitmentData) ProtoMessage() {} +func (*PacketCommitmentData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{12} +} +func (m *PacketCommitmentData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketCommitmentData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketCommitmentData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketCommitmentData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketCommitmentData.Merge(m, src) +} +func (m *PacketCommitmentData) XXX_Size() int { + return m.Size() +} +func (m *PacketCommitmentData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketCommitmentData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketCommitmentData proto.InternalMessageInfo + +func (m *PacketCommitmentData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *PacketCommitmentData) GetCommitment() []byte { + if m != nil { + return m.Commitment + } + return nil +} + +// PacketAcknowledgementData returns the SignBytes data for acknowledgement +// verification. +type PacketAcknowledgementData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Acknowledgement []byte `protobuf:"bytes,2,opt,name=acknowledgement,proto3" json:"acknowledgement,omitempty"` +} + +func (m *PacketAcknowledgementData) Reset() { *m = PacketAcknowledgementData{} } +func (m *PacketAcknowledgementData) String() string { return proto.CompactTextString(m) } +func (*PacketAcknowledgementData) ProtoMessage() {} +func (*PacketAcknowledgementData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{13} +} +func (m *PacketAcknowledgementData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketAcknowledgementData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketAcknowledgementData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketAcknowledgementData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketAcknowledgementData.Merge(m, src) +} +func (m *PacketAcknowledgementData) XXX_Size() int { + return m.Size() +} +func (m *PacketAcknowledgementData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketAcknowledgementData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketAcknowledgementData proto.InternalMessageInfo + +func (m *PacketAcknowledgementData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *PacketAcknowledgementData) GetAcknowledgement() []byte { + if m != nil { + return m.Acknowledgement + } + return nil +} + +// PacketReceiptAbsenceData returns the SignBytes data for +// packet receipt absence verification. +type PacketReceiptAbsenceData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (m *PacketReceiptAbsenceData) Reset() { *m = PacketReceiptAbsenceData{} } +func (m *PacketReceiptAbsenceData) String() string { return proto.CompactTextString(m) } +func (*PacketReceiptAbsenceData) ProtoMessage() {} +func (*PacketReceiptAbsenceData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{14} +} +func (m *PacketReceiptAbsenceData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketReceiptAbsenceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketReceiptAbsenceData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketReceiptAbsenceData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketReceiptAbsenceData.Merge(m, src) +} +func (m *PacketReceiptAbsenceData) XXX_Size() int { + return m.Size() +} +func (m *PacketReceiptAbsenceData) XXX_DiscardUnknown() { + xxx_messageInfo_PacketReceiptAbsenceData.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketReceiptAbsenceData proto.InternalMessageInfo + +func (m *PacketReceiptAbsenceData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +// NextSequenceRecvData returns the SignBytes data for verification of the next +// sequence to be received. +type NextSequenceRecvData struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + NextSeqRecv uint64 `protobuf:"varint,2,opt,name=next_seq_recv,json=nextSeqRecv,proto3" json:"next_seq_recv,omitempty" yaml:"next_seq_recv"` +} + +func (m *NextSequenceRecvData) Reset() { *m = NextSequenceRecvData{} } +func (m *NextSequenceRecvData) String() string { return proto.CompactTextString(m) } +func (*NextSequenceRecvData) ProtoMessage() {} +func (*NextSequenceRecvData) Descriptor() ([]byte, []int) { + return fileDescriptor_39862ff634781870, []int{15} +} +func (m *NextSequenceRecvData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NextSequenceRecvData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NextSequenceRecvData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NextSequenceRecvData) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextSequenceRecvData.Merge(m, src) +} +func (m *NextSequenceRecvData) XXX_Size() int { + return m.Size() +} +func (m *NextSequenceRecvData) XXX_DiscardUnknown() { + xxx_messageInfo_NextSequenceRecvData.DiscardUnknown(m) +} + +var xxx_messageInfo_NextSequenceRecvData proto.InternalMessageInfo + +func (m *NextSequenceRecvData) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *NextSequenceRecvData) GetNextSeqRecv() uint64 { + if m != nil { + return m.NextSeqRecv + } + return 0 +} + +func init() { + proto.RegisterEnum("ibcgo.lightclients.solomachine.v1.DataType", DataType_name, DataType_value) + proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.solomachine.v1.ClientState") + proto.RegisterType((*ConsensusState)(nil), "ibcgo.lightclients.solomachine.v1.ConsensusState") + proto.RegisterType((*Header)(nil), "ibcgo.lightclients.solomachine.v1.Header") + proto.RegisterType((*Misbehaviour)(nil), "ibcgo.lightclients.solomachine.v1.Misbehaviour") + proto.RegisterType((*SignatureAndData)(nil), "ibcgo.lightclients.solomachine.v1.SignatureAndData") + proto.RegisterType((*TimestampedSignatureData)(nil), "ibcgo.lightclients.solomachine.v1.TimestampedSignatureData") + proto.RegisterType((*SignBytes)(nil), "ibcgo.lightclients.solomachine.v1.SignBytes") + proto.RegisterType((*HeaderData)(nil), "ibcgo.lightclients.solomachine.v1.HeaderData") + proto.RegisterType((*ClientStateData)(nil), "ibcgo.lightclients.solomachine.v1.ClientStateData") + proto.RegisterType((*ConsensusStateData)(nil), "ibcgo.lightclients.solomachine.v1.ConsensusStateData") + proto.RegisterType((*ConnectionStateData)(nil), "ibcgo.lightclients.solomachine.v1.ConnectionStateData") + proto.RegisterType((*ChannelStateData)(nil), "ibcgo.lightclients.solomachine.v1.ChannelStateData") + proto.RegisterType((*PacketCommitmentData)(nil), "ibcgo.lightclients.solomachine.v1.PacketCommitmentData") + proto.RegisterType((*PacketAcknowledgementData)(nil), "ibcgo.lightclients.solomachine.v1.PacketAcknowledgementData") + proto.RegisterType((*PacketReceiptAbsenceData)(nil), "ibcgo.lightclients.solomachine.v1.PacketReceiptAbsenceData") + proto.RegisterType((*NextSequenceRecvData)(nil), "ibcgo.lightclients.solomachine.v1.NextSequenceRecvData") +} + +func init() { + proto.RegisterFile("ibcgo/lightclients/solomachine/v1/solomachine.proto", fileDescriptor_39862ff634781870) +} + +var fileDescriptor_39862ff634781870 = []byte{ + // 1361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdd, 0x8e, 0xda, 0xd6, + 0x13, 0x5f, 0x13, 0xb2, 0x59, 0x86, 0x0d, 0xcb, 0xdf, 0x21, 0x09, 0xeb, 0x44, 0xe0, 0xbf, 0x23, + 0xa5, 0xdb, 0x8f, 0x40, 0x37, 0x51, 0xa3, 0x28, 0xad, 0xda, 0x1a, 0xe3, 0x26, 0x24, 0xbb, 0x5e, + 0x6a, 0x4c, 0xdb, 0xe4, 0xa2, 0x96, 0x31, 0x67, 0xc1, 0x0a, 0xd8, 0x14, 0x1b, 0x08, 0x95, 0x2a, + 0x55, 0xbd, 0x4a, 0x51, 0x2f, 0xfa, 0x02, 0x48, 0x55, 0xab, 0xbe, 0x4b, 0xa4, 0xde, 0x44, 0xea, + 0x4d, 0xaf, 0x50, 0x9b, 0xbc, 0x01, 0x4f, 0x50, 0xd9, 0xe7, 0x18, 0xdb, 0xec, 0x2e, 0x49, 0xbf, + 0xee, 0xce, 0x99, 0xf9, 0xcd, 0x6f, 0xe6, 0xcc, 0x8c, 0xe7, 0x1c, 0xc3, 0x0d, 0xa3, 0xa1, 0xb7, + 0xac, 0x62, 0xc7, 0x68, 0xb5, 0x1d, 0xbd, 0x63, 0x20, 0xd3, 0xb1, 0x8b, 0xb6, 0xd5, 0xb1, 0xba, + 0x9a, 0xde, 0x36, 0x4c, 0x54, 0x1c, 0xee, 0x86, 0xb7, 0x85, 0x5e, 0xdf, 0x72, 0x2c, 0xfa, 0xff, + 0x9e, 0x51, 0x21, 0x6c, 0x54, 0x08, 0xa3, 0x86, 0xbb, 0xcc, 0xeb, 0x98, 0x57, 0xb7, 0xfa, 0xa8, + 0xa8, 0x5b, 0xa6, 0x89, 0x74, 0xc7, 0xb0, 0x4c, 0x97, 0x2e, 0xd8, 0x61, 0x36, 0xe6, 0x4a, 0x18, + 0xda, 0xd6, 0x4c, 0x13, 0x75, 0x3c, 0x1c, 0x5e, 0x12, 0x50, 0xa6, 0x65, 0xb5, 0x2c, 0x6f, 0x59, + 0x74, 0x57, 0x44, 0xba, 0xdd, 0xb2, 0xac, 0x56, 0x07, 0x15, 0xbd, 0x5d, 0x63, 0x70, 0x58, 0xd4, + 0xcc, 0x31, 0x56, 0x71, 0xbf, 0xc6, 0x20, 0x29, 0x78, 0xb1, 0xd5, 0x1c, 0xcd, 0x41, 0x34, 0x03, + 0x1b, 0x36, 0xfa, 0x62, 0x80, 0x4c, 0x1d, 0x65, 0x29, 0x96, 0xda, 0x89, 0xcb, 0x8b, 0x3d, 0x2d, + 0xc0, 0xd6, 0x61, 0xdf, 0xfa, 0x12, 0x99, 0xea, 0x02, 0x12, 0x73, 0x21, 0x25, 0x66, 0x3e, 0xcb, + 0x5f, 0x18, 0x6b, 0xdd, 0xce, 0x6d, 0x6e, 0x09, 0xc0, 0xc9, 0x29, 0x2c, 0xa9, 0xf9, 0x24, 0x43, + 0xd8, 0xd2, 0x2d, 0xd3, 0x46, 0xa6, 0x3d, 0xb0, 0x55, 0xdb, 0xf5, 0x99, 0x3d, 0xc5, 0x52, 0x3b, + 0xc9, 0xeb, 0xbb, 0x85, 0x97, 0xa6, 0xab, 0x20, 0xf8, 0x96, 0x5e, 0xb0, 0x61, 0xbf, 0x4b, 0x9c, + 0x9c, 0x9c, 0xd2, 0x23, 0x58, 0x1a, 0xc1, 0x25, 0xad, 0xd3, 0xb1, 0x46, 0xea, 0xa0, 0xd7, 0xd4, + 0x1c, 0xa4, 0x6a, 0x87, 0x0e, 0xea, 0xab, 0xbd, 0xbe, 0xd5, 0xb3, 0x6c, 0xad, 0x93, 0x8d, 0xb3, + 0xd4, 0xce, 0x46, 0xe9, 0xea, 0x7c, 0x96, 0xe7, 0x30, 0xe1, 0x0a, 0x30, 0x27, 0x67, 0x3d, 0x6d, + 0xdd, 0x53, 0xf2, 0xae, 0xae, 0x4a, 0x54, 0xb7, 0xe3, 0x4f, 0x7e, 0xc8, 0xaf, 0x71, 0x3f, 0x52, + 0x90, 0x8a, 0xc6, 0x4a, 0xdf, 0x03, 0xe8, 0x0d, 0x1a, 0x1d, 0x43, 0x57, 0x1f, 0xa1, 0xb1, 0x97, + 0xda, 0xe4, 0xf5, 0x4c, 0x01, 0x17, 0xa6, 0xe0, 0x17, 0xa6, 0xc0, 0x9b, 0xe3, 0xd2, 0xf9, 0xf9, + 0x2c, 0xff, 0x3f, 0x1c, 0x44, 0x60, 0xc1, 0xc9, 0x09, 0xbc, 0xb9, 0x8f, 0xc6, 0x34, 0x0b, 0xc9, + 0xa6, 0x31, 0x44, 0x7d, 0xdb, 0x38, 0x34, 0x50, 0xdf, 0x2b, 0x42, 0x42, 0x0e, 0x8b, 0xe8, 0xcb, + 0x90, 0x70, 0x8c, 0x2e, 0xb2, 0x1d, 0xad, 0xdb, 0xf3, 0xf2, 0x1b, 0x97, 0x03, 0x01, 0x09, 0xf2, + 0x9b, 0x18, 0xac, 0xdf, 0x45, 0x5a, 0x13, 0xf5, 0x57, 0x56, 0x3d, 0x42, 0x15, 0x5b, 0xa2, 0x72, + 0xb5, 0xb6, 0xd1, 0x32, 0x35, 0x67, 0xd0, 0xc7, 0x85, 0xdc, 0x94, 0x03, 0x01, 0x5d, 0x87, 0x94, + 0x89, 0x46, 0x6a, 0xe8, 0xe0, 0xf1, 0x15, 0x07, 0xdf, 0x9e, 0xcf, 0xf2, 0xe7, 0xf1, 0xc1, 0xa3, + 0x56, 0x9c, 0xbc, 0x69, 0xa2, 0x51, 0x75, 0x71, 0x7e, 0x01, 0xb6, 0x5c, 0x40, 0x38, 0x07, 0xa7, + 0xdd, 0x1c, 0x84, 0x1b, 0x62, 0x09, 0xc0, 0xc9, 0x6e, 0x24, 0xe5, 0x40, 0x40, 0x92, 0xf0, 0x4b, + 0x0c, 0x36, 0xf7, 0x0d, 0xbb, 0x81, 0xda, 0xda, 0xd0, 0xb0, 0x06, 0x7d, 0x7a, 0x17, 0x12, 0xb8, + 0xf9, 0x54, 0xa3, 0xe9, 0xe5, 0x22, 0x51, 0xca, 0xcc, 0x67, 0xf9, 0x34, 0x69, 0x33, 0x5f, 0xc5, + 0xc9, 0x1b, 0x78, 0x5d, 0x69, 0x46, 0xb2, 0x17, 0x5b, 0xca, 0x5e, 0x1f, 0xce, 0x2e, 0xd2, 0xa1, + 0x5a, 0xa6, 0xdf, 0xec, 0x37, 0x5e, 0xa1, 0xd9, 0x6b, 0xbe, 0x1d, 0x6f, 0x36, 0xcb, 0x9a, 0xa3, + 0x95, 0xb2, 0xf3, 0x59, 0x3e, 0x83, 0xe3, 0x88, 0x70, 0x72, 0xf2, 0xe6, 0x62, 0x7f, 0x60, 0x2e, + 0xf9, 0x74, 0x46, 0x16, 0x49, 0xfa, 0xbf, 0xe7, 0xd3, 0x19, 0x59, 0x61, 0x9f, 0xca, 0xc8, 0x22, + 0xd9, 0x7c, 0x4a, 0x41, 0x7a, 0x99, 0x22, 0xda, 0x22, 0xd4, 0x72, 0x8b, 0x7c, 0x0e, 0x89, 0xa6, + 0xe6, 0x68, 0xaa, 0x33, 0xee, 0xe1, 0xec, 0xa5, 0xae, 0xbf, 0xf9, 0x0a, 0x81, 0xba, 0xcc, 0xca, + 0xb8, 0x87, 0xc2, 0xc5, 0x59, 0xf0, 0x70, 0xf2, 0x46, 0x93, 0xe8, 0x69, 0x1a, 0xe2, 0xee, 0x9a, + 0xf4, 0xa6, 0xb7, 0x8e, 0xb6, 0x74, 0xfc, 0xf8, 0xaf, 0xe3, 0x6b, 0x0a, 0xb2, 0x8a, 0x2f, 0x43, + 0xcd, 0xc5, 0xa9, 0xbc, 0x23, 0x7d, 0x08, 0xa9, 0x20, 0x1b, 0x1e, 0xbd, 0x77, 0xae, 0x70, 0x07, + 0x47, 0xf5, 0x9c, 0x1c, 0x94, 0xa4, 0x7c, 0x24, 0x84, 0xd8, 0xf1, 0x21, 0xfc, 0x41, 0x41, 0xc2, + 0xf5, 0x5b, 0x1a, 0x3b, 0xc8, 0xfe, 0x07, 0xdf, 0xe8, 0xd2, 0xb8, 0x38, 0x75, 0x74, 0x5c, 0x44, + 0x8a, 0x10, 0xff, 0xef, 0x8a, 0x70, 0x3a, 0x28, 0x02, 0x39, 0xe3, 0xcf, 0x14, 0x00, 0x1e, 0x42, + 0x5e, 0x5a, 0xf6, 0x20, 0x49, 0x3e, 0xfd, 0x97, 0x8e, 0xc9, 0x0b, 0xf3, 0x59, 0x9e, 0x8e, 0x4c, + 0x0b, 0x32, 0x27, 0xf1, 0xa8, 0x38, 0x61, 0x4e, 0xc4, 0xfe, 0xe6, 0x9c, 0xf8, 0x0a, 0xb6, 0x42, + 0xd7, 0xa4, 0x17, 0x2b, 0x0d, 0xf1, 0x9e, 0xe6, 0xb4, 0x49, 0x4b, 0x7b, 0x6b, 0xba, 0x0a, 0x9b, + 0x64, 0x44, 0xe0, 0xab, 0x2d, 0xb6, 0xe2, 0x00, 0x17, 0xe7, 0xb3, 0xfc, 0xb9, 0xc8, 0x58, 0x21, + 0x57, 0x57, 0x52, 0x0f, 0x3c, 0x11, 0xf7, 0xdf, 0x52, 0x40, 0x47, 0x2f, 0x94, 0x13, 0x43, 0x78, + 0x70, 0xf4, 0x82, 0x5d, 0x15, 0xc5, 0x5f, 0xb8, 0x43, 0x49, 0x2c, 0x8f, 0xe1, 0x9c, 0xb0, 0x78, + 0x9c, 0xac, 0x8e, 0xe5, 0x0e, 0x40, 0xf0, 0x8e, 0x21, 0x61, 0xbc, 0x46, 0x1a, 0xcb, 0x7d, 0xc8, + 0x14, 0x42, 0xaf, 0x1c, 0x7c, 0xbd, 0x93, 0x9d, 0x68, 0x36, 0xe5, 0x90, 0x29, 0xf1, 0x7c, 0x08, + 0x69, 0x01, 0x3f, 0x77, 0x56, 0xbb, 0xbd, 0x05, 0x67, 0xc8, 0xb3, 0x88, 0xf8, 0xcc, 0x45, 0x7c, + 0x92, 0x17, 0x93, 0xeb, 0x10, 0x2f, 0x65, 0x1f, 0x4e, 0xfc, 0xdc, 0x83, 0x4c, 0x55, 0xd3, 0x1f, + 0x21, 0x47, 0xb0, 0xba, 0x5d, 0xc3, 0xe9, 0x22, 0xd3, 0x39, 0xd1, 0x57, 0xce, 0x3d, 0xa2, 0x8f, + 0xf2, 0xdc, 0x6d, 0xca, 0x21, 0x09, 0xf7, 0x00, 0xb6, 0x31, 0x17, 0xaf, 0x3f, 0x32, 0xad, 0x51, + 0x07, 0x35, 0x5b, 0x68, 0x25, 0xe1, 0x0e, 0x6c, 0x69, 0x51, 0x28, 0x61, 0x5d, 0x16, 0x73, 0x05, + 0xc8, 0x62, 0x6a, 0x19, 0xe9, 0xc8, 0xe8, 0x39, 0x7c, 0xc3, 0x76, 0xa7, 0xc1, 0x49, 0xcc, 0x5c, + 0x1b, 0x32, 0x12, 0x7a, 0xec, 0xf8, 0x4f, 0x31, 0x19, 0xe9, 0xc3, 0x13, 0xa3, 0x78, 0x0f, 0xce, + 0x9a, 0xe8, 0xb1, 0xe3, 0x3e, 0xe4, 0xd4, 0x3e, 0xd2, 0x87, 0xe4, 0xa5, 0x17, 0xba, 0x0e, 0x22, + 0x6a, 0x4e, 0x4e, 0x9a, 0x98, 0xda, 0x65, 0x7d, 0xe3, 0xbb, 0x38, 0x6c, 0xf8, 0xc3, 0x81, 0xbe, + 0x05, 0x57, 0xca, 0xbc, 0xc2, 0xab, 0xca, 0x83, 0xaa, 0xa8, 0xd6, 0xa5, 0x8a, 0x54, 0x51, 0x2a, + 0xfc, 0x5e, 0xe5, 0xa1, 0x58, 0x56, 0xeb, 0x52, 0xad, 0x2a, 0x0a, 0x95, 0x8f, 0x2a, 0x62, 0x39, + 0xbd, 0xc6, 0x6c, 0x4d, 0xa6, 0x6c, 0x32, 0x24, 0xa2, 0xaf, 0xc2, 0x85, 0xc0, 0x52, 0xd8, 0xab, + 0x88, 0x92, 0xa2, 0xd6, 0x14, 0x5e, 0x11, 0xd3, 0x14, 0x03, 0x93, 0x29, 0xbb, 0x8e, 0x65, 0xf4, + 0x5b, 0xb0, 0x1d, 0xc2, 0x1d, 0x48, 0x35, 0x51, 0xaa, 0xd5, 0x6b, 0x04, 0x1a, 0x63, 0xce, 0x4e, + 0xa6, 0x6c, 0x62, 0x21, 0xa6, 0x0b, 0xc0, 0x44, 0xd0, 0x92, 0x28, 0x28, 0x95, 0x03, 0x89, 0xc0, + 0x4f, 0x31, 0xa9, 0xc9, 0x94, 0x85, 0x40, 0x4e, 0xef, 0xc0, 0xc5, 0x10, 0xfe, 0x2e, 0x2f, 0x49, + 0xe2, 0x1e, 0x01, 0xc7, 0x99, 0xe4, 0x64, 0xca, 0x9e, 0x21, 0x42, 0xfa, 0x1d, 0xb8, 0x14, 0x20, + 0xab, 0xbc, 0x70, 0x5f, 0x54, 0x54, 0xe1, 0x60, 0x7f, 0xbf, 0xa2, 0xec, 0x8b, 0x92, 0x92, 0x3e, + 0xcd, 0x64, 0x26, 0x53, 0x36, 0x8d, 0x15, 0x81, 0x9c, 0xfe, 0x00, 0xd8, 0x23, 0x66, 0xbc, 0x70, + 0x5f, 0x3a, 0xf8, 0x74, 0x4f, 0x2c, 0xdf, 0x11, 0x3d, 0xdb, 0x75, 0x66, 0x7b, 0x32, 0x65, 0xcf, + 0x63, 0xed, 0x92, 0x92, 0x7e, 0xff, 0x18, 0x02, 0x59, 0x14, 0xc4, 0x4a, 0x55, 0x51, 0xf9, 0x52, + 0x4d, 0x94, 0x04, 0x31, 0x7d, 0x86, 0xc9, 0x4e, 0xa6, 0x6c, 0x06, 0x6b, 0x89, 0x92, 0xe8, 0xe8, + 0x9b, 0x70, 0x39, 0xb0, 0x97, 0xc4, 0xcf, 0x14, 0xb5, 0x26, 0x7e, 0x5c, 0x77, 0x55, 0x2e, 0xcd, + 0x27, 0xe9, 0x0d, 0x1c, 0xb8, 0xab, 0xf1, 0x15, 0xae, 0x9c, 0x66, 0x21, 0x1d, 0xd8, 0xdd, 0x15, + 0xf9, 0xb2, 0x28, 0xa7, 0x13, 0xb8, 0x32, 0x78, 0xc7, 0xc4, 0x9f, 0xfc, 0x94, 0x5b, 0x2b, 0xd5, + 0x9f, 0x3e, 0xcf, 0x51, 0xcf, 0x9e, 0xe7, 0xa8, 0xdf, 0x9f, 0xe7, 0xa8, 0xef, 0x5f, 0xe4, 0xd6, + 0x9e, 0xbd, 0xc8, 0xad, 0xfd, 0xf6, 0x22, 0xb7, 0xf6, 0xf0, 0xdd, 0x96, 0xe1, 0xb4, 0x07, 0x8d, + 0x82, 0x6e, 0x75, 0x8b, 0xba, 0x65, 0x77, 0x2d, 0xbb, 0x68, 0x34, 0xf4, 0x6b, 0xfe, 0xaf, 0xd6, + 0x35, 0xff, 0x5f, 0xeb, 0xed, 0x9b, 0xd7, 0xc2, 0xbf, 0x5b, 0xee, 0xfd, 0x62, 0x37, 0xd6, 0xbd, + 0x41, 0x76, 0xe3, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb9, 0xb1, 0x1d, 0x54, 0x9d, 0x0d, 0x00, + 0x00, +} + +func (m *ClientState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllowUpdateAfterProposal { + i-- + if m.AllowUpdateAfterProposal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.FrozenSequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.FrozenSequence)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ConsensusState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if len(m.Diversifier) > 0 { + i -= len(m.Diversifier) + copy(dAtA[i:], m.Diversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier))) + i-- + dAtA[i] = 0x12 + } + if m.PublicKey != nil { + { + size, err := m.PublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewDiversifier) > 0 { + i -= len(m.NewDiversifier) + copy(dAtA[i:], m.NewDiversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier))) + i-- + dAtA[i] = 0x2a + } + if m.NewPublicKey != nil { + { + size, err := m.NewPublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Misbehaviour) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Misbehaviour) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Misbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SignatureTwo != nil { + { + size, err := m.SignatureTwo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.SignatureOne != nil { + { + size, err := m.SignatureOne.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x10 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignatureAndData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureAndData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureAndData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x20 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + if m.DataType != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType)) + i-- + dAtA[i] = 0x10 + } + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TimestampedSignatureData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimestampedSignatureData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimestampedSignatureData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if len(m.SignatureData) > 0 { + i -= len(m.SignatureData) + copy(dAtA[i:], m.SignatureData) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.SignatureData))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignBytes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignBytes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignBytes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x2a + } + if m.DataType != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.DataType)) + i-- + dAtA[i] = 0x20 + } + if len(m.Diversifier) > 0 { + i -= len(m.Diversifier) + copy(dAtA[i:], m.Diversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Diversifier))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Sequence != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.Sequence)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HeaderData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeaderData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HeaderData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewDiversifier) > 0 { + i -= len(m.NewDiversifier) + copy(dAtA[i:], m.NewDiversifier) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.NewDiversifier))) + i-- + dAtA[i] = 0x12 + } + if m.NewPubKey != nil { + { + size, err := m.NewPubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClientStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClientState != nil { + { + size, err := m.ClientState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConsensusState != nil { + { + size, err := m.ConsensusState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectionStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectionStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConnectionStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Connection != nil { + { + size, err := m.Connection.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChannelStateData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChannelStateData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChannelStateData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Channel != nil { + { + size, err := m.Channel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSolomachine(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketCommitmentData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketCommitmentData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketCommitmentData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Commitment) > 0 { + i -= len(m.Commitment) + copy(dAtA[i:], m.Commitment) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Commitment))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketAcknowledgementData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketAcknowledgementData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketAcknowledgementData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Acknowledgement) > 0 { + i -= len(m.Acknowledgement) + copy(dAtA[i:], m.Acknowledgement) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Acknowledgement))) + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PacketReceiptAbsenceData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketReceiptAbsenceData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketReceiptAbsenceData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NextSequenceRecvData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NextSequenceRecvData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NextSequenceRecvData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextSeqRecv != 0 { + i = encodeVarintSolomachine(dAtA, i, uint64(m.NextSeqRecv)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintSolomachine(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSolomachine(dAtA []byte, offset int, v uint64) int { + offset -= sovSolomachine(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClientState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.FrozenSequence != 0 { + n += 1 + sovSolomachine(uint64(m.FrozenSequence)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.AllowUpdateAfterProposal { + n += 2 + } + return n +} + +func (m *ConsensusState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PublicKey != nil { + l = m.PublicKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Diversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *Header) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.NewPublicKey != nil { + l = m.NewPublicKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.NewDiversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *Misbehaviour) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.SignatureOne != nil { + l = m.SignatureOne.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.SignatureTwo != nil { + l = m.SignatureTwo.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *SignatureAndData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.DataType != 0 { + n += 1 + sovSolomachine(uint64(m.DataType)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *TimestampedSignatureData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SignatureData) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + return n +} + +func (m *SignBytes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sequence != 0 { + n += 1 + sovSolomachine(uint64(m.Sequence)) + } + if m.Timestamp != 0 { + n += 1 + sovSolomachine(uint64(m.Timestamp)) + } + l = len(m.Diversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.DataType != 0 { + n += 1 + sovSolomachine(uint64(m.DataType)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *HeaderData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NewPubKey != nil { + l = m.NewPubKey.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.NewDiversifier) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ClientStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.ClientState != nil { + l = m.ClientState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ConsensusStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.ConsensusState != nil { + l = m.ConsensusState.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ConnectionStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Connection != nil { + l = m.Connection.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *ChannelStateData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.Channel != nil { + l = m.Channel.Size() + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketCommitmentData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Commitment) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketAcknowledgementData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + l = len(m.Acknowledgement) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *PacketReceiptAbsenceData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + return n +} + +func (m *NextSequenceRecvData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovSolomachine(uint64(l)) + } + if m.NextSeqRecv != 0 { + n += 1 + sovSolomachine(uint64(m.NextSeqRecv)) + } + return n +} + +func sovSolomachine(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSolomachine(x uint64) (n int) { + return sovSolomachine(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClientState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FrozenSequence", wireType) + } + m.FrozenSequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FrozenSequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &ConsensusState{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterProposal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowUpdateAfterProposal = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PublicKey == nil { + m.PublicKey = &types.Any{} + } + if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Diversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPublicKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPublicKey == nil { + m.NewPublicKey = &types.Any{} + } + if err := m.NewPublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewDiversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Misbehaviour) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Misbehaviour: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Misbehaviour: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureOne", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignatureOne == nil { + m.SignatureOne = &SignatureAndData{} + } + if err := m.SignatureOne.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureTwo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignatureTwo == nil { + m.SignatureTwo = &SignatureAndData{} + } + if err := m.SignatureTwo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureAndData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureAndData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureAndData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType) + } + m.DataType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataType |= DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimestampedSignatureData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimestampedSignatureData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimestampedSignatureData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SignatureData = append(m.SignatureData[:0], dAtA[iNdEx:postIndex]...) + if m.SignatureData == nil { + m.SignatureData = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignBytes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignBytes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignBytes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sequence", wireType) + } + m.Sequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Diversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType) + } + m.DataType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataType |= DataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeaderData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeaderData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeaderData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPubKey == nil { + m.NewPubKey = &types.Any{} + } + if err := m.NewPubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewDiversifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewDiversifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientState == nil { + m.ClientState = &types.Any{} + } + if err := m.ClientState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusState == nil { + m.ConsensusState = &types.Any{} + } + if err := m.ConsensusState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectionStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectionStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectionStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Connection", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Connection == nil { + m.Connection = &types1.ConnectionEnd{} + } + if err := m.Connection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChannelStateData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChannelStateData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChannelStateData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Channel == nil { + m.Channel = &types2.Channel{} + } + if err := m.Channel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketCommitmentData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketCommitmentData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketCommitmentData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commitment", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commitment = append(m.Commitment[:0], dAtA[iNdEx:postIndex]...) + if m.Commitment == nil { + m.Commitment = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketAcknowledgementData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketAcknowledgementData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketAcknowledgementData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Acknowledgement", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Acknowledgement = append(m.Acknowledgement[:0], dAtA[iNdEx:postIndex]...) + if m.Acknowledgement == nil { + m.Acknowledgement = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketReceiptAbsenceData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketReceiptAbsenceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketReceiptAbsenceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NextSequenceRecvData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NextSequenceRecvData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NextSequenceRecvData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSolomachine + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSolomachine + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = append(m.Path[:0], dAtA[iNdEx:postIndex]...) + if m.Path == nil { + m.Path = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextSeqRecv", wireType) + } + m.NextSeqRecv = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSolomachine + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextSeqRecv |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSolomachine(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSolomachine + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSolomachine(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSolomachine + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSolomachine + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSolomachine + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSolomachine + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSolomachine = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSolomachine = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSolomachine = fmt.Errorf("proto: unexpected end of group") +) diff --git a/light-clients/06-solomachine/types/solomachine_test.go b/light-clients/06-solomachine/types/solomachine_test.go new file mode 100644 index 0000000000..50555e4514 --- /dev/null +++ b/light-clients/06-solomachine/types/solomachine_test.go @@ -0,0 +1,113 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +type SoloMachineTestSuite struct { + suite.Suite + + solomachine *ibctesting.Solomachine // singlesig public key + solomachineMulti *ibctesting.Solomachine // multisig public key + coordinator *ibctesting.Coordinator + + // testing chain used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + + store sdk.KVStore +} + +func (suite *SoloMachineTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + + suite.solomachine = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1) + suite.solomachineMulti = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinemulti", "testing", 4) + + suite.store = suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), exported.Solomachine) +} + +func TestSoloMachineTestSuite(t *testing.T) { + suite.Run(t, new(SoloMachineTestSuite)) +} + +func (suite *SoloMachineTestSuite) GetSequenceFromStore() uint64 { + bz := suite.store.Get(host.ClientStateKey()) + suite.Require().NotNil(bz) + + var clientState exported.ClientState + err := suite.chainA.Codec.UnmarshalInterface(bz, &clientState) + suite.Require().NoError(err) + return clientState.GetLatestHeight().GetRevisionHeight() +} + +func (suite *SoloMachineTestSuite) GetInvalidProof() []byte { + invalidProof, err := suite.chainA.Codec.MarshalBinaryBare(&types.TimestampedSignatureData{Timestamp: suite.solomachine.Time}) + suite.Require().NoError(err) + + return invalidProof +} + +func TestUnpackInterfaces_Header(t *testing.T) { + registry := testdata.NewTestInterfaceRegistry() + cryptocodec.RegisterInterfaces(registry) + + pk := secp256k1.GenPrivKey().PubKey().(cryptotypes.PubKey) + any, err := codectypes.NewAnyWithValue(pk) + require.NoError(t, err) + + header := types.Header{ + NewPublicKey: any, + } + bz, err := header.Marshal() + require.NoError(t, err) + + var header2 types.Header + err = header2.Unmarshal(bz) + require.NoError(t, err) + + err = codectypes.UnpackInterfaces(header2, registry) + require.NoError(t, err) + + require.Equal(t, pk, header2.NewPublicKey.GetCachedValue()) +} + +func TestUnpackInterfaces_HeaderData(t *testing.T) { + registry := testdata.NewTestInterfaceRegistry() + cryptocodec.RegisterInterfaces(registry) + + pk := secp256k1.GenPrivKey().PubKey().(cryptotypes.PubKey) + any, err := codectypes.NewAnyWithValue(pk) + require.NoError(t, err) + + hd := types.HeaderData{ + NewPubKey: any, + } + bz, err := hd.Marshal() + require.NoError(t, err) + + var hd2 types.HeaderData + err = hd2.Unmarshal(bz) + require.NoError(t, err) + + err = codectypes.UnpackInterfaces(hd2, registry) + require.NoError(t, err) + + require.Equal(t, pk, hd2.NewPubKey.GetCachedValue()) +} diff --git a/light-clients/06-solomachine/types/update.go b/light-clients/06-solomachine/types/update.go new file mode 100644 index 0000000000..4cf31fd988 --- /dev/null +++ b/light-clients/06-solomachine/types/update.go @@ -0,0 +1,89 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CheckHeaderAndUpdateState checks if the provided header is valid and updates +// the consensus state if appropriate. It returns an error if: +// - the header provided is not parseable to a solo machine header +// - the header sequence does not match the current sequence +// - the header timestamp is less than the consensus state timestamp +// - the currently registered public key did not provide the update signature +func (cs ClientState) CheckHeaderAndUpdateState( + ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore, + header exported.Header, +) (exported.ClientState, exported.ConsensusState, error) { + smHeader, ok := header.(*Header) + if !ok { + return nil, nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidHeader, "header type %T, expected %T", header, &Header{}, + ) + } + + if err := checkHeader(cdc, &cs, smHeader); err != nil { + return nil, nil, err + } + + clientState, consensusState := update(&cs, smHeader) + return clientState, consensusState, nil +} + +// checkHeader checks if the Solo Machine update signature is valid. +func checkHeader(cdc codec.BinaryMarshaler, clientState *ClientState, header *Header) error { + // assert update sequence is current sequence + if header.Sequence != clientState.Sequence { + return sdkerrors.Wrapf( + clienttypes.ErrInvalidHeader, + "header sequence does not match the client state sequence (%d != %d)", header.Sequence, clientState.Sequence, + ) + } + + // assert update timestamp is not less than current consensus state timestamp + if header.Timestamp < clientState.ConsensusState.Timestamp { + return sdkerrors.Wrapf( + clienttypes.ErrInvalidHeader, + "header timestamp is less than to the consensus state timestamp (%d < %d)", header.Timestamp, clientState.ConsensusState.Timestamp, + ) + } + + // assert currently registered public key signed over the new public key with correct sequence + data, err := HeaderSignBytes(cdc, header) + if err != nil { + return err + } + + sigData, err := UnmarshalSignatureData(cdc, header.Signature) + if err != nil { + return err + } + + publicKey, err := clientState.ConsensusState.GetPubKey() + if err != nil { + return err + } + + if err := VerifySignature(publicKey, data, sigData); err != nil { + return sdkerrors.Wrap(ErrInvalidHeader, err.Error()) + } + + return nil +} + +// update the consensus state to the new public key and an incremented sequence +func update(clientState *ClientState, header *Header) (*ClientState, *ConsensusState) { + consensusState := &ConsensusState{ + PublicKey: header.NewPublicKey, + Diversifier: header.NewDiversifier, + Timestamp: header.Timestamp, + } + + // increment sequence number + clientState.Sequence++ + clientState.ConsensusState = consensusState + return clientState, consensusState +} diff --git a/light-clients/06-solomachine/types/update_test.go b/light-clients/06-solomachine/types/update_test.go new file mode 100644 index 0000000000..e49992cbb5 --- /dev/null +++ b/light-clients/06-solomachine/types/update_test.go @@ -0,0 +1,181 @@ +package types_test + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *SoloMachineTestSuite) TestCheckHeaderAndUpdateState() { + var ( + clientState exported.ClientState + header exported.Header + ) + + // test singlesig and multisig public keys + for _, solomachine := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + + testCases := []struct { + name string + setup func() + expPass bool + }{ + { + "successful update", + func() { + clientState = solomachine.ClientState() + header = solomachine.CreateHeader() + }, + true, + }, + { + "wrong client state type", + func() { + clientState = &ibctmtypes.ClientState{} + header = solomachine.CreateHeader() + }, + false, + }, + { + "invalid header type", + func() { + clientState = solomachine.ClientState() + header = &ibctmtypes.Header{} + }, + false, + }, + { + "wrong sequence in header", + func() { + clientState = solomachine.ClientState() + // store in temp before assigning to interface type + h := solomachine.CreateHeader() + h.Sequence++ + header = h + }, + false, + }, + { + "invalid header Signature", + func() { + clientState = solomachine.ClientState() + h := solomachine.CreateHeader() + h.Signature = suite.GetInvalidProof() + header = h + }, false, + }, + { + "invalid timestamp in header", + func() { + clientState = solomachine.ClientState() + h := solomachine.CreateHeader() + h.Timestamp-- + header = h + }, false, + }, + { + "signature uses wrong sequence", + func() { + clientState = solomachine.ClientState() + solomachine.Sequence++ + header = solomachine.CreateHeader() + }, + false, + }, + { + "signature uses new pubkey to sign", + func() { + // store in temp before assinging to interface type + cs := solomachine.ClientState() + h := solomachine.CreateHeader() + + publicKey, err := codectypes.NewAnyWithValue(solomachine.PublicKey) + suite.NoError(err) + + data := &types.HeaderData{ + NewPubKey: publicKey, + NewDiversifier: h.NewDiversifier, + } + + dataBz, err := suite.chainA.Codec.MarshalBinaryBare(data) + suite.Require().NoError(err) + + // generate invalid signature + signBytes := &types.SignBytes{ + Sequence: cs.Sequence, + Timestamp: solomachine.Time, + Diversifier: solomachine.Diversifier, + DataType: types.CLIENT, + Data: dataBz, + } + + signBz, err := suite.chainA.Codec.MarshalBinaryBare(signBytes) + suite.Require().NoError(err) + + sig := solomachine.GenerateSignature(signBz) + suite.Require().NoError(err) + h.Signature = sig + + clientState = cs + header = h + + }, + false, + }, + { + "signature signs over old pubkey", + func() { + // store in temp before assinging to interface type + cs := solomachine.ClientState() + oldPubKey := solomachine.PublicKey + h := solomachine.CreateHeader() + + // generate invalid signature + data := append(sdk.Uint64ToBigEndian(cs.Sequence), oldPubKey.Bytes()...) + sig := solomachine.GenerateSignature(data) + h.Signature = sig + + clientState = cs + header = h + }, + false, + }, + { + "consensus state public key is nil", + func() { + cs := solomachine.ClientState() + cs.ConsensusState.PublicKey = nil + clientState = cs + header = solomachine.CreateHeader() + }, + false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + // setup test + tc.setup() + + clientState, consensusState, err := clientState.CheckHeaderAndUpdateState(suite.chainA.GetContext(), suite.chainA.Codec, suite.store, header) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().Equal(header.(*types.Header).NewPublicKey, clientState.(*types.ClientState).ConsensusState.PublicKey) + suite.Require().Equal(uint64(0), clientState.(*types.ClientState).FrozenSequence) + suite.Require().Equal(header.(*types.Header).Sequence+1, clientState.(*types.ClientState).Sequence) + suite.Require().Equal(consensusState, clientState.(*types.ClientState).ConsensusState) + } else { + suite.Require().Error(err) + suite.Require().Nil(clientState) + suite.Require().Nil(consensusState) + } + }) + } + } +} diff --git a/light-clients/07-tendermint/doc.go b/light-clients/07-tendermint/doc.go new file mode 100644 index 0000000000..26aa430a82 --- /dev/null +++ b/light-clients/07-tendermint/doc.go @@ -0,0 +1,5 @@ +/* +Package tendermint implements a concrete `ConsensusState`, `Header`, +`Misbehaviour` and `Equivocation` types for the Tendermint consensus light client. +*/ +package tendermint diff --git a/light-clients/07-tendermint/module.go b/light-clients/07-tendermint/module.go new file mode 100644 index 0000000000..4c5cc2f947 --- /dev/null +++ b/light-clients/07-tendermint/module.go @@ -0,0 +1,10 @@ +package tendermint + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" +) + +// Name returns the IBC client name +func Name() string { + return types.SubModuleName +} diff --git a/light-clients/07-tendermint/types/client_state.go b/light-clients/07-tendermint/types/client_state.go new file mode 100644 index 0000000000..c2bb5239f5 --- /dev/null +++ b/light-clients/07-tendermint/types/client_state.go @@ -0,0 +1,532 @@ +package types + +import ( + "strings" + "time" + + ics23 "github.com/confio/ics23/go" + "github.com/tendermint/tendermint/light" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.ClientState = (*ClientState)(nil) + +// NewClientState creates a new ClientState instance +func NewClientState( + chainID string, trustLevel Fraction, + trustingPeriod, ubdPeriod, maxClockDrift time.Duration, + latestHeight clienttypes.Height, specs []*ics23.ProofSpec, + upgradePath []string, allowUpdateAfterExpiry, allowUpdateAfterMisbehaviour bool, +) *ClientState { + return &ClientState{ + ChainId: chainID, + TrustLevel: trustLevel, + TrustingPeriod: trustingPeriod, + UnbondingPeriod: ubdPeriod, + MaxClockDrift: maxClockDrift, + LatestHeight: latestHeight, + FrozenHeight: clienttypes.ZeroHeight(), + ProofSpecs: specs, + UpgradePath: upgradePath, + AllowUpdateAfterExpiry: allowUpdateAfterExpiry, + AllowUpdateAfterMisbehaviour: allowUpdateAfterMisbehaviour, + } +} + +// GetChainID returns the chain-id +func (cs ClientState) GetChainID() string { + return cs.ChainId +} + +// ClientType is tendermint. +func (cs ClientState) ClientType() string { + return exported.Tendermint +} + +// GetLatestHeight returns latest block height. +func (cs ClientState) GetLatestHeight() exported.Height { + return cs.LatestHeight +} + +// IsFrozen returns true if the frozen height has been set. +func (cs ClientState) IsFrozen() bool { + return !cs.FrozenHeight.IsZero() +} + +// GetFrozenHeight returns the height at which client is frozen +// NOTE: FrozenHeight is zero if client is unfrozen +func (cs ClientState) GetFrozenHeight() exported.Height { + return cs.FrozenHeight +} + +// IsExpired returns whether or not the client has passed the trusting period since the last +// update (in which case no headers are considered valid). +func (cs ClientState) IsExpired(latestTimestamp, now time.Time) bool { + expirationTime := latestTimestamp.Add(cs.TrustingPeriod) + return !expirationTime.After(now) +} + +// Validate performs a basic validation of the client state fields. +func (cs ClientState) Validate() error { + if strings.TrimSpace(cs.ChainId) == "" { + return sdkerrors.Wrap(ErrInvalidChainID, "chain id cannot be empty string") + } + if err := light.ValidateTrustLevel(cs.TrustLevel.ToTendermint()); err != nil { + return err + } + if cs.TrustingPeriod == 0 { + return sdkerrors.Wrap(ErrInvalidTrustingPeriod, "trusting period cannot be zero") + } + if cs.UnbondingPeriod == 0 { + return sdkerrors.Wrap(ErrInvalidUnbondingPeriod, "unbonding period cannot be zero") + } + if cs.MaxClockDrift == 0 { + return sdkerrors.Wrap(ErrInvalidMaxClockDrift, "max clock drift cannot be zero") + } + if cs.LatestHeight.RevisionHeight == 0 { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "tendermint revision height cannot be zero") + } + if cs.TrustingPeriod >= cs.UnbondingPeriod { + return sdkerrors.Wrapf( + ErrInvalidTrustingPeriod, + "trusting period (%s) should be < unbonding period (%s)", cs.TrustingPeriod, cs.UnbondingPeriod, + ) + } + + if cs.ProofSpecs == nil { + return sdkerrors.Wrap(ErrInvalidProofSpecs, "proof specs cannot be nil for tm client") + } + for i, spec := range cs.ProofSpecs { + if spec == nil { + return sdkerrors.Wrapf(ErrInvalidProofSpecs, "proof spec cannot be nil at index: %d", i) + } + } + // UpgradePath may be empty, but if it isn't, each key must be non-empty + for i, k := range cs.UpgradePath { + if strings.TrimSpace(k) == "" { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "key in upgrade path at index %d cannot be empty", i) + } + } + + return nil +} + +// GetProofSpecs returns the format the client expects for proof verification +// as a string array specifying the proof type for each position in chained proof +func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec { + return cs.ProofSpecs +} + +// ZeroCustomFields returns a ClientState that is a copy of the current ClientState +// with all client customizable fields zeroed out +func (cs ClientState) ZeroCustomFields() exported.ClientState { + // copy over all chain-specified fields + // and leave custom fields empty + return &ClientState{ + ChainId: cs.ChainId, + UnbondingPeriod: cs.UnbondingPeriod, + LatestHeight: cs.LatestHeight, + ProofSpecs: cs.ProofSpecs, + UpgradePath: cs.UpgradePath, + } +} + +// Initialize will check that initial consensus state is a Tendermint consensus state +// and will store ProcessedTime for initial consensus state as ctx.BlockTime() +func (cs ClientState) Initialize(ctx sdk.Context, _ codec.BinaryMarshaler, clientStore sdk.KVStore, consState exported.ConsensusState) error { + if _, ok := consState.(*ConsensusState); !ok { + return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid initial consensus state. expected type: %T, got: %T", + &ConsensusState{}, consState) + } + // set processed time with initial consensus state height equal to initial client state's latest height + SetProcessedTime(clientStore, cs.GetLatestHeight(), uint64(ctx.BlockTime().UnixNano())) + return nil +} + +// VerifyClientState verifies a proof of the client state of the running chain +// stored on the target machine +func (cs ClientState) VerifyClientState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + prefix exported.Prefix, + counterpartyClientIdentifier string, + proof []byte, + clientState exported.ClientState, +) error { + merkleProof, provingConsensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullClientStatePath(counterpartyClientIdentifier)) + path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath) + if err != nil { + return err + } + + if clientState == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidClient, "client state cannot be empty") + } + + _, ok := clientState.(*ClientState) + if !ok { + return sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "invalid client type %T, expected %T", clientState, &ClientState{}) + } + + bz, err := cdc.MarshalInterface(clientState) + if err != nil { + return err + } + + return merkleProof.VerifyMembership(cs.ProofSpecs, provingConsensusState.GetRoot(), path, bz) +} + +// VerifyClientConsensusState verifies a proof of the consensus state of the +// Tendermint client stored on the target machine. +func (cs ClientState) VerifyClientConsensusState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + counterpartyClientIdentifier string, + consensusHeight exported.Height, + prefix exported.Prefix, + proof []byte, + consensusState exported.ConsensusState, +) error { + merkleProof, provingConsensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + clientPrefixedPath := commitmenttypes.NewMerklePath(host.FullConsensusStatePath(counterpartyClientIdentifier, consensusHeight)) + path, err := commitmenttypes.ApplyPrefix(prefix, clientPrefixedPath) + if err != nil { + return err + } + + if consensusState == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "consensus state cannot be empty") + } + + _, ok := consensusState.(*ConsensusState) + if !ok { + return sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "invalid consensus type %T, expected %T", consensusState, &ConsensusState{}) + } + + bz, err := cdc.MarshalInterface(consensusState) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, provingConsensusState.GetRoot(), path, bz); err != nil { + return err + } + + return nil +} + +// VerifyConnectionState verifies a proof of the connection state of the +// specified connection end stored on the target machine. +func (cs ClientState) VerifyConnectionState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + prefix exported.Prefix, + proof []byte, + connectionID string, + connectionEnd exported.ConnectionI, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + connectionPath := commitmenttypes.NewMerklePath(host.ConnectionPath(connectionID)) + path, err := commitmenttypes.ApplyPrefix(prefix, connectionPath) + if err != nil { + return err + } + + connection, ok := connectionEnd.(connectiontypes.ConnectionEnd) + if !ok { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "invalid connection type %T", connectionEnd) + } + + bz, err := cdc.MarshalBinaryBare(&connection) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil { + return err + } + + return nil +} + +// VerifyChannelState verifies a proof of the channel state of the specified +// channel end, under the specified port, stored on the target machine. +func (cs ClientState) VerifyChannelState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + channel exported.ChannelI, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + channelPath := commitmenttypes.NewMerklePath(host.ChannelPath(portID, channelID)) + path, err := commitmenttypes.ApplyPrefix(prefix, channelPath) + if err != nil { + return err + } + + channelEnd, ok := channel.(channeltypes.Channel) + if !ok { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidType, "invalid channel type %T", channel) + } + + bz, err := cdc.MarshalBinaryBare(&channelEnd) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil { + return err + } + + return nil +} + +// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at +// the specified port, specified channel, and specified sequence. +func (cs ClientState) VerifyPacketCommitment( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + currentTimestamp uint64, + delayPeriod uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, + commitmentBytes []byte, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + // check delay period has passed + if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil { + return err + } + + commitmentPath := commitmenttypes.NewMerklePath(host.PacketCommitmentPath(portID, channelID, sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, commitmentPath) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, commitmentBytes); err != nil { + return err + } + + return nil +} + +// VerifyPacketAcknowledgement verifies a proof of an incoming packet +// acknowledgement at the specified port, specified channel, and specified sequence. +func (cs ClientState) VerifyPacketAcknowledgement( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + currentTimestamp uint64, + delayPeriod uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, + acknowledgement []byte, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + // check delay period has passed + if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil { + return err + } + + ackPath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementPath(portID, channelID, sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, ackPath) + if err != nil { + return err + } + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, channeltypes.CommitAcknowledgement(acknowledgement)); err != nil { + return err + } + + return nil +} + +// VerifyPacketReceiptAbsence verifies a proof of the absence of an +// incoming packet receipt at the specified port, specified channel, and +// specified sequence. +func (cs ClientState) VerifyPacketReceiptAbsence( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + currentTimestamp uint64, + delayPeriod uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + sequence uint64, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + // check delay period has passed + if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil { + return err + } + + receiptPath := commitmenttypes.NewMerklePath(host.PacketReceiptPath(portID, channelID, sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, receiptPath) + if err != nil { + return err + } + + if err := merkleProof.VerifyNonMembership(cs.ProofSpecs, consensusState.GetRoot(), path); err != nil { + return err + } + + return nil +} + +// VerifyNextSequenceRecv verifies a proof of the next sequence number to be +// received of the specified channel at the specified port. +func (cs ClientState) VerifyNextSequenceRecv( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + height exported.Height, + currentTimestamp uint64, + delayPeriod uint64, + prefix exported.Prefix, + proof []byte, + portID, + channelID string, + nextSequenceRecv uint64, +) error { + merkleProof, consensusState, err := produceVerificationArgs(store, cdc, cs, height, prefix, proof) + if err != nil { + return err + } + + // check delay period has passed + if err := verifyDelayPeriodPassed(store, height, currentTimestamp, delayPeriod); err != nil { + return err + } + + nextSequenceRecvPath := commitmenttypes.NewMerklePath(host.NextSequenceRecvPath(portID, channelID)) + path, err := commitmenttypes.ApplyPrefix(prefix, nextSequenceRecvPath) + if err != nil { + return err + } + + bz := sdk.Uint64ToBigEndian(nextSequenceRecv) + + if err := merkleProof.VerifyMembership(cs.ProofSpecs, consensusState.GetRoot(), path, bz); err != nil { + return err + } + + return nil +} + +// verifyDelayPeriodPassed will ensure that at least delayPeriod amount of time has passed since consensus state was submitted +// before allowing verification to continue. +func verifyDelayPeriodPassed(store sdk.KVStore, proofHeight exported.Height, currentTimestamp, delayPeriod uint64) error { + // check that executing chain's timestamp has passed consensusState's processed time + delay period + processedTime, ok := GetProcessedTime(store, proofHeight) + if !ok { + return sdkerrors.Wrapf(ErrProcessedTimeNotFound, "processed time not found for height: %s", proofHeight) + } + validTime := processedTime + delayPeriod + // NOTE: delay period is inclusive, so if currentTimestamp is validTime, then we return no error + if validTime > currentTimestamp { + return sdkerrors.Wrapf(ErrDelayPeriodNotPassed, "cannot verify packet until time: %d, current time: %d", + validTime, currentTimestamp) + } + return nil +} + +// produceVerificationArgs perfoms the basic checks on the arguments that are +// shared between the verification functions and returns the unmarshalled +// merkle proof, the consensus state and an error if one occurred. +func produceVerificationArgs( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + cs ClientState, + height exported.Height, + prefix exported.Prefix, + proof []byte, +) (merkleProof commitmenttypes.MerkleProof, consensusState *ConsensusState, err error) { + if cs.GetLatestHeight().LT(height) { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrapf( + sdkerrors.ErrInvalidHeight, + "client state height < proof height (%d < %d)", cs.GetLatestHeight(), height, + ) + } + + if cs.IsFrozen() && !cs.FrozenHeight.GT(height) { + return commitmenttypes.MerkleProof{}, nil, clienttypes.ErrClientFrozen + } + + if prefix == nil { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidPrefix, "prefix cannot be empty") + } + + _, ok := prefix.(*commitmenttypes.MerklePrefix) + if !ok { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidPrefix, "invalid prefix type %T, expected *MerklePrefix", prefix) + } + + if proof == nil { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "proof cannot be empty") + } + + if err = cdc.UnmarshalBinaryBare(proof, &merkleProof); err != nil { + return commitmenttypes.MerkleProof{}, nil, sdkerrors.Wrap(commitmenttypes.ErrInvalidProof, "failed to unmarshal proof into commitment merkle proof") + } + + consensusState, err = GetConsensusState(store, cdc, height) + if err != nil { + return commitmenttypes.MerkleProof{}, nil, err + } + + return merkleProof, consensusState, nil +} diff --git a/light-clients/07-tendermint/types/client_state_test.go b/light-clients/07-tendermint/types/client_state_test.go new file mode 100644 index 0000000000..744b4729f6 --- /dev/null +++ b/light-clients/07-tendermint/types/client_state_test.go @@ -0,0 +1,779 @@ +package types_test + +import ( + "time" + + ics23 "github.com/confio/ics23/go" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibcmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +const ( + testClientID = "clientidone" + testConnectionID = "connectionid" + testPortID = "testportid" + testChannelID = "testchannelid" + testSequence = 1 +) + +var ( + invalidProof = []byte("invalid proof") +) + +func (suite *TendermintTestSuite) TestValidate() { + testCases := []struct { + name string + clientState *types.ClientState + expPass bool + }{ + { + name: "valid client", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + expPass: true, + }, + { + name: "valid client with nil upgrade path", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), nil, false, false), + expPass: true, + }, + { + name: "invalid chainID", + clientState: types.NewClientState(" ", types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + expPass: false, + }, + { + name: "invalid trust level", + clientState: types.NewClientState(chainID, types.Fraction{Numerator: 0, Denominator: 1}, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + expPass: false, + }, + { + name: "invalid trusting period", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, 0, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + expPass: false, + }, + { + name: "invalid unbonding period", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, 0, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + expPass: false, + }, + { + name: "invalid max clock drift", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, 0, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + expPass: false, + }, + { + name: "invalid height", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.ZeroHeight(), commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + expPass: false, + }, + { + name: "trusting period not less than unbonding period", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, ubdPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + expPass: false, + }, + { + name: "proof specs is nil", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, ubdPeriod, ubdPeriod, maxClockDrift, height, nil, upgradePath, false, false), + expPass: false, + }, + { + name: "proof specs contains nil", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, ubdPeriod, ubdPeriod, maxClockDrift, height, []*ics23.ProofSpec{ics23.TendermintSpec, nil}, upgradePath, false, false), + expPass: false, + }, + } + + for _, tc := range testCases { + err := tc.clientState.Validate() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *TendermintTestSuite) TestInitialize() { + + testCases := []struct { + name string + consensusState exported.ConsensusState + expPass bool + }{ + { + name: "valid consensus", + consensusState: &types.ConsensusState{}, + expPass: true, + }, + { + name: "invalid consensus: consensus state is solomachine consensus", + consensusState: ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ConsensusState(), + expPass: false, + }, + } + + clientA, err := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint) + suite.Require().NoError(err) + + clientState := suite.chainA.GetClientState(clientA) + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + + for _, tc := range testCases { + err := clientState.Initialize(suite.chainA.GetContext(), suite.chainA.Codec, store, tc.consensusState) + if tc.expPass { + suite.Require().NoError(err, "valid case returned an error") + } else { + suite.Require().Error(err, "invalid case didn't return an error") + } + } +} + +func (suite *TendermintTestSuite) TestVerifyClientConsensusState() { + testCases := []struct { + name string + clientState *types.ClientState + consensusState *types.ConsensusState + prefix commitmenttypes.MerklePrefix + proof []byte + expPass bool + }{ + // FIXME: uncomment + // { + // name: "successful verification", + // clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs()), + // consensusState: types.ConsensusState{ + // Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + // }, + // prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")), + // expPass: true, + // }, + { + name: "ApplyPrefix failed", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + consensusState: &types.ConsensusState{ + Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + }, + prefix: commitmenttypes.MerklePrefix{}, + expPass: false, + }, + { + name: "latest client height < height", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + consensusState: &types.ConsensusState{ + Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + }, + prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")), + expPass: false, + }, + { + name: "client is frozen", + clientState: &types.ClientState{LatestHeight: height, FrozenHeight: clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1)}, + consensusState: &types.ConsensusState{ + Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + }, + prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")), + expPass: false, + }, + { + name: "proof verification failed", + clientState: types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + consensusState: &types.ConsensusState{ + Root: commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), + NextValidatorsHash: suite.valsHash, + }, + prefix: commitmenttypes.NewMerklePrefix([]byte("ibc")), + proof: []byte{}, + expPass: false, + }, + } + + for i, tc := range testCases { + tc := tc + + err := tc.clientState.VerifyClientConsensusState( + nil, suite.cdc, height, "chainA", tc.clientState.LatestHeight, tc.prefix, tc.proof, tc.consensusState, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +// test verification of the connection on chainB being represented in the +// light client on chainA +func (suite *TendermintTestSuite) TestVerifyConnectionState() { + var ( + clientState *types.ClientState + proof []byte + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "client is frozen", func() { + clientState.FrozenHeight = clienttypes.NewHeight(0, 1) + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + clientA, _, _, connB, _, _ := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + connection := suite.chainB.GetConnection(connB) + + var ok bool + clientStateI := suite.chainA.GetClientState(clientA) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = suite.chainB.GetPrefix() + + // make connection proof + connectionKey := host.ConnectionKey(connB.ID) + proof, proofHeight = suite.chainB.QueryProof(connectionKey) + + tc.malleate() // make changes as necessary + + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + + err := clientState.VerifyConnectionState( + store, suite.chainA.Codec, proofHeight, &prefix, proof, connB.ID, connection, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the channel on chainB being represented in the light +// client on chainA +func (suite *TendermintTestSuite) TestVerifyChannelState() { + var ( + clientState *types.ClientState + proof []byte + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "client is frozen", func() { + clientState.FrozenHeight = clienttypes.NewHeight(0, 1) + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + clientA, _, _, _, _, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + channel := suite.chainB.GetChannel(channelB) + + var ok bool + clientStateI := suite.chainA.GetClientState(clientA) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = suite.chainB.GetPrefix() + + // make channel proof + channelKey := host.ChannelKey(channelB.PortID, channelB.ID) + proof, proofHeight = suite.chainB.QueryProof(channelKey) + + tc.malleate() // make changes as necessary + + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + + err := clientState.VerifyChannelState( + store, suite.chainA.Codec, proofHeight, &prefix, proof, + channelB.PortID, channelB.ID, channel, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the packet commitment on chainB being represented +// in the light client on chainA. A send from chainB to chainA is simulated. +func (suite *TendermintTestSuite) TestVerifyPacketCommitment() { + var ( + clientState *types.ClientState + proof []byte + delayPeriod uint64 + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + name: "delay period has passed", + malleate: func() { + delayPeriod = uint64(time.Second.Nanoseconds()) + }, + expPass: true, + }, + { + name: "delay period has not passed", + malleate: func() { + delayPeriod = uint64(time.Hour.Nanoseconds()) + }, + expPass: false, + }, + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "client is frozen", func() { + clientState.FrozenHeight = clienttypes.NewHeight(0, 1) + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + clientA, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelB.PortID, channelB.ID, channelA.PortID, channelA.ID, clienttypes.NewHeight(0, 100), 0) + err := suite.coordinator.SendPacket(suite.chainB, suite.chainA, packet, clientA) + suite.Require().NoError(err) + + var ok bool + clientStateI := suite.chainA.GetClientState(clientA) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = suite.chainB.GetPrefix() + + // make packet commitment proof + packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + proof, proofHeight = suite.chainB.QueryProof(packetKey) + + tc.malleate() // make changes as necessary + + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + + currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano()) + commitment := channeltypes.CommitPacket(suite.chainA.App.IBCKeeper.Codec(), packet) + err = clientState.VerifyPacketCommitment( + store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof, + packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the acknowledgement on chainB being represented +// in the light client on chainA. A send and ack from chainA to chainB +// is simulated. +func (suite *TendermintTestSuite) TestVerifyPacketAcknowledgement() { + var ( + clientState *types.ClientState + proof []byte + delayPeriod uint64 + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + name: "delay period has passed", + malleate: func() { + delayPeriod = uint64(time.Second.Nanoseconds()) + }, + expPass: true, + }, + { + name: "delay period has not passed", + malleate: func() { + delayPeriod = uint64(time.Hour.Nanoseconds()) + }, + expPass: false, + }, + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "client is frozen", func() { + clientState.FrozenHeight = clienttypes.NewHeight(0, 1) + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0) + + // send packet + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // write receipt and ack + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + var ok bool + clientStateI := suite.chainA.GetClientState(clientA) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = suite.chainB.GetPrefix() + + // make packet acknowledgement proof + acknowledgementKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight = suite.chainB.QueryProof(acknowledgementKey) + + tc.malleate() // make changes as necessary + + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + + currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano()) + err = clientState.VerifyPacketAcknowledgement( + store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ibcmock.MockAcknowledgement, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the absent acknowledgement on chainB being represented +// in the light client on chainA. A send from chainB to chainA is simulated, but +// no receive. +func (suite *TendermintTestSuite) TestVerifyPacketReceiptAbsence() { + var ( + clientState *types.ClientState + proof []byte + delayPeriod uint64 + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + name: "delay period has passed", + malleate: func() { + delayPeriod = uint64(time.Second.Nanoseconds()) + }, + expPass: true, + }, + { + name: "delay period has not passed", + malleate: func() { + delayPeriod = uint64(time.Hour.Nanoseconds()) + }, + expPass: false, + }, + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "client is frozen", func() { + clientState.FrozenHeight = clienttypes.NewHeight(0, 1) + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0) + + // send packet, but no recv + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // need to update chainA's client representing chainB to prove missing ack + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + var ok bool + clientStateI := suite.chainA.GetClientState(clientA) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = suite.chainB.GetPrefix() + + // make packet receipt absence proof + receiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight = suite.chainB.QueryProof(receiptKey) + + tc.malleate() // make changes as necessary + + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + + currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano()) + err = clientState.VerifyPacketReceiptAbsence( + store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +// test verification of the next receive sequence on chainB being represented +// in the light client on chainA. A send and receive from chainB to chainA is +// simulated. +func (suite *TendermintTestSuite) TestVerifyNextSeqRecv() { + var ( + clientState *types.ClientState + proof []byte + delayPeriod uint64 + proofHeight exported.Height + prefix commitmenttypes.MerklePrefix + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "successful verification", func() {}, true, + }, + { + name: "delay period has passed", + malleate: func() { + delayPeriod = uint64(time.Second.Nanoseconds()) + }, + expPass: true, + }, + { + name: "delay period has not passed", + malleate: func() { + delayPeriod = uint64(time.Hour.Nanoseconds()) + }, + expPass: false, + }, + { + "ApplyPrefix failed", func() { + prefix = commitmenttypes.MerklePrefix{} + }, false, + }, + { + "latest client height < height", func() { + proofHeight = clientState.LatestHeight.Increment() + }, false, + }, + { + "client is frozen", func() { + clientState.FrozenHeight = clienttypes.NewHeight(0, 1) + }, false, + }, + { + "proof verification failed", func() { + proof = invalidProof + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + // setup testing conditions + clientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.ORDERED) + packet := channeltypes.NewPacket(ibctesting.TestHash, 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, clienttypes.NewHeight(0, 100), 0) + + // send packet + err := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB) + suite.Require().NoError(err) + + // next seq recv incremented + err = suite.coordinator.RecvPacket(suite.chainA, suite.chainB, clientA, packet) + suite.Require().NoError(err) + + // need to update chainA's client representing chainB + suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + + var ok bool + clientStateI := suite.chainA.GetClientState(clientA) + clientState, ok = clientStateI.(*types.ClientState) + suite.Require().True(ok) + + prefix = suite.chainB.GetPrefix() + + // make next seq recv proof + nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) + proof, proofHeight = suite.chainB.QueryProof(nextSeqRecvKey) + + tc.malleate() // make changes as necessary + + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + + currentTime := uint64(suite.chainA.GetContext().BlockTime().UnixNano()) + err = clientState.VerifyNextSequenceRecv( + store, suite.chainA.Codec, proofHeight, currentTime, delayPeriod, &prefix, proof, + packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+1, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/light-clients/07-tendermint/types/codec.go b/light-clients/07-tendermint/types/codec.go new file mode 100644 index 0000000000..5d876c8fe0 --- /dev/null +++ b/light-clients/07-tendermint/types/codec.go @@ -0,0 +1,27 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// RegisterInterfaces registers the tendermint concrete client-related +// implementations and interfaces. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*exported.ClientState)(nil), + &ClientState{}, + ) + registry.RegisterImplementations( + (*exported.ConsensusState)(nil), + &ConsensusState{}, + ) + registry.RegisterImplementations( + (*exported.Header)(nil), + &Header{}, + ) + registry.RegisterImplementations( + (*exported.Misbehaviour)(nil), + &Misbehaviour{}, + ) +} diff --git a/light-clients/07-tendermint/types/consensus_state.go b/light-clients/07-tendermint/types/consensus_state.go new file mode 100644 index 0000000000..adb469a3d1 --- /dev/null +++ b/light-clients/07-tendermint/types/consensus_state.go @@ -0,0 +1,55 @@ +package types + +import ( + "time" + + tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmtypes "github.com/tendermint/tendermint/types" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// NewConsensusState creates a new ConsensusState instance. +func NewConsensusState( + timestamp time.Time, root commitmenttypes.MerkleRoot, nextValsHash tmbytes.HexBytes, +) *ConsensusState { + return &ConsensusState{ + Timestamp: timestamp, + Root: root, + NextValidatorsHash: nextValsHash, + } +} + +// ClientType returns Tendermint +func (ConsensusState) ClientType() string { + return exported.Tendermint +} + +// GetRoot returns the commitment Root for the specific +func (cs ConsensusState) GetRoot() exported.Root { + return cs.Root +} + +// GetTimestamp returns block time in nanoseconds of the header that created consensus state +func (cs ConsensusState) GetTimestamp() uint64 { + return uint64(cs.Timestamp.UnixNano()) +} + +// ValidateBasic defines a basic validation for the tendermint consensus state. +// NOTE: ProcessedTimestamp may be zero if this is an initial consensus state passed in by relayer +// as opposed to a consensus state constructed by the chain. +func (cs ConsensusState) ValidateBasic() error { + if cs.Root.Empty() { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "root cannot be empty") + } + if err := tmtypes.ValidateHash(cs.NextValidatorsHash); err != nil { + return sdkerrors.Wrap(err, "next validators hash is invalid") + } + if cs.Timestamp.Unix() <= 0 { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "timestamp must be a positive Unix time") + } + return nil +} diff --git a/light-clients/07-tendermint/types/consensus_state_test.go b/light-clients/07-tendermint/types/consensus_state_test.go new file mode 100644 index 0000000000..313815d0c7 --- /dev/null +++ b/light-clients/07-tendermint/types/consensus_state_test.go @@ -0,0 +1,69 @@ +package types_test + +import ( + "time" + + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" +) + +func (suite *TendermintTestSuite) TestConsensusStateValidateBasic() { + testCases := []struct { + msg string + consensusState *types.ConsensusState + expectPass bool + }{ + {"success", + &types.ConsensusState{ + Timestamp: suite.now, + Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")), + NextValidatorsHash: suite.valsHash, + }, + true}, + {"root is nil", + &types.ConsensusState{ + Timestamp: suite.now, + Root: commitmenttypes.MerkleRoot{}, + NextValidatorsHash: suite.valsHash, + }, + false}, + {"root is empty", + &types.ConsensusState{ + Timestamp: suite.now, + Root: commitmenttypes.MerkleRoot{}, + NextValidatorsHash: suite.valsHash, + }, + false}, + {"nextvalshash is invalid", + &types.ConsensusState{ + Timestamp: suite.now, + Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")), + NextValidatorsHash: []byte("hi"), + }, + false}, + + {"timestamp is zero", + &types.ConsensusState{ + Timestamp: time.Time{}, + Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")), + NextValidatorsHash: suite.valsHash, + }, + false}, + } + + for i, tc := range testCases { + tc := tc + + // check just to increase coverage + suite.Require().Equal(exported.Tendermint, tc.consensusState.ClientType()) + suite.Require().Equal(tc.consensusState.GetRoot(), tc.consensusState.Root) + + err := tc.consensusState.ValidateBasic() + if tc.expectPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.msg) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.msg) + } + } +} diff --git a/light-clients/07-tendermint/types/errors.go b/light-clients/07-tendermint/types/errors.go new file mode 100644 index 0000000000..276c225b73 --- /dev/null +++ b/light-clients/07-tendermint/types/errors.go @@ -0,0 +1,25 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + SubModuleName = "tendermint-client" +) + +// IBC tendermint client sentinel errors +var ( + ErrInvalidChainID = sdkerrors.Register(SubModuleName, 2, "invalid chain-id") + ErrInvalidTrustingPeriod = sdkerrors.Register(SubModuleName, 3, "invalid trusting period") + ErrInvalidUnbondingPeriod = sdkerrors.Register(SubModuleName, 4, "invalid unbonding period") + ErrInvalidHeaderHeight = sdkerrors.Register(SubModuleName, 5, "invalid header height") + ErrInvalidHeader = sdkerrors.Register(SubModuleName, 6, "invalid header") + ErrInvalidMaxClockDrift = sdkerrors.Register(SubModuleName, 7, "invalid max clock drift") + ErrProcessedTimeNotFound = sdkerrors.Register(SubModuleName, 8, "processed time not found") + ErrDelayPeriodNotPassed = sdkerrors.Register(SubModuleName, 9, "packet-specified delay period has not been reached") + ErrTrustingPeriodExpired = sdkerrors.Register(SubModuleName, 10, "time since latest trusted state has passed the trusting period") + ErrUnbondingPeriodExpired = sdkerrors.Register(SubModuleName, 11, "time since latest trusted state has passed the unbonding period") + ErrInvalidProofSpecs = sdkerrors.Register(SubModuleName, 12, "invalid proof specs") + ErrInvalidValidatorSet = sdkerrors.Register(SubModuleName, 13, "invalid validator set") +) diff --git a/light-clients/07-tendermint/types/fraction.go b/light-clients/07-tendermint/types/fraction.go new file mode 100644 index 0000000000..e445f19ba6 --- /dev/null +++ b/light-clients/07-tendermint/types/fraction.go @@ -0,0 +1,25 @@ +package types + +import ( + tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/light" +) + +// DefaultTrustLevel is the tendermint light client default trust level +var DefaultTrustLevel = NewFractionFromTm(light.DefaultTrustLevel) + +// NewFractionFromTm returns a new Fraction instance from a tmmath.Fraction +func NewFractionFromTm(f tmmath.Fraction) Fraction { + return Fraction{ + Numerator: f.Numerator, + Denominator: f.Denominator, + } +} + +// ToTendermint converts Fraction to tmmath.Fraction +func (f Fraction) ToTendermint() tmmath.Fraction { + return tmmath.Fraction{ + Numerator: f.Numerator, + Denominator: f.Denominator, + } +} diff --git a/light-clients/07-tendermint/types/genesis.go b/light-clients/07-tendermint/types/genesis.go new file mode 100644 index 0000000000..7124643b55 --- /dev/null +++ b/light-clients/07-tendermint/types/genesis.go @@ -0,0 +1,21 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// ExportMetadata exports all the processed times in the client store so they can be included in clients genesis +// and imported by a ClientKeeper +func (cs ClientState) ExportMetadata(store sdk.KVStore) []exported.GenesisMetadata { + gm := make([]exported.GenesisMetadata, 0) + IterateProcessedTime(store, func(key, val []byte) bool { + gm = append(gm, clienttypes.NewGenesisMetadata(key, val)) + return false + }) + if len(gm) == 0 { + return nil + } + return gm +} diff --git a/light-clients/07-tendermint/types/genesis_test.go b/light-clients/07-tendermint/types/genesis_test.go new file mode 100644 index 0000000000..5732151e63 --- /dev/null +++ b/light-clients/07-tendermint/types/genesis_test.go @@ -0,0 +1,38 @@ +package types_test + +import ( + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" +) + +func (suite *TendermintTestSuite) TestExportMetadata() { + clientState := types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), "clientA", clientState) + + gm := clientState.ExportMetadata(suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA")) + suite.Require().Nil(gm, "client with no metadata returned non-nil exported metadata") + + clientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA") + + // set some processed times + timestamp1 := uint64(time.Now().UnixNano()) + timestamp2 := uint64(time.Now().Add(time.Minute).UnixNano()) + timestampBz1 := sdk.Uint64ToBigEndian(timestamp1) + timestampBz2 := sdk.Uint64ToBigEndian(timestamp2) + types.SetProcessedTime(clientStore, clienttypes.NewHeight(0, 1), timestamp1) + types.SetProcessedTime(clientStore, clienttypes.NewHeight(0, 2), timestamp2) + + gm = clientState.ExportMetadata(suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), "clientA")) + suite.Require().NotNil(gm, "client with metadata returned nil exported metadata") + suite.Require().Len(gm, 2, "exported metadata has unexpected length") + + suite.Require().Equal(types.ProcessedTimeKey(clienttypes.NewHeight(0, 1)), gm[0].GetKey(), "metadata has unexpected key") + suite.Require().Equal(timestampBz1, gm[0].GetValue(), "metadata has unexpected value") + + suite.Require().Equal(types.ProcessedTimeKey(clienttypes.NewHeight(0, 2)), gm[1].GetKey(), "metadata has unexpected key") + suite.Require().Equal(timestampBz2, gm[1].GetValue(), "metadata has unexpected value") +} diff --git a/light-clients/07-tendermint/types/header.go b/light-clients/07-tendermint/types/header.go new file mode 100644 index 0000000000..0b9cfa1db1 --- /dev/null +++ b/light-clients/07-tendermint/types/header.go @@ -0,0 +1,83 @@ +package types + +import ( + "bytes" + "time" + + tmtypes "github.com/tendermint/tendermint/types" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.Header = &Header{} + +// ConsensusState returns the updated consensus state associated with the header +func (h Header) ConsensusState() *ConsensusState { + return &ConsensusState{ + Timestamp: h.GetTime(), + Root: commitmenttypes.NewMerkleRoot(h.Header.GetAppHash()), + NextValidatorsHash: h.Header.NextValidatorsHash, + } +} + +// ClientType defines that the Header is a Tendermint consensus algorithm +func (h Header) ClientType() string { + return exported.Tendermint +} + +// GetHeight returns the current height. It returns 0 if the tendermint +// header is nil. +// NOTE: the header.Header is checked to be non nil in ValidateBasic. +func (h Header) GetHeight() exported.Height { + revision := clienttypes.ParseChainID(h.Header.ChainID) + return clienttypes.NewHeight(revision, uint64(h.Header.Height)) +} + +// GetTime returns the current block timestamp. It returns a zero time if +// the tendermint header is nil. +// NOTE: the header.Header is checked to be non nil in ValidateBasic. +func (h Header) GetTime() time.Time { + return h.Header.Time +} + +// ValidateBasic calls the SignedHeader ValidateBasic function and checks +// that validatorsets are not nil. +// NOTE: TrustedHeight and TrustedValidators may be empty when creating client +// with MsgCreateClient +func (h Header) ValidateBasic() error { + if h.SignedHeader == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "tendermint signed header cannot be nil") + } + if h.Header == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "tendermint header cannot be nil") + } + tmSignedHeader, err := tmtypes.SignedHeaderFromProto(h.SignedHeader) + if err != nil { + return sdkerrors.Wrap(err, "header is not a tendermint header") + } + if err := tmSignedHeader.ValidateBasic(h.Header.GetChainID()); err != nil { + return sdkerrors.Wrap(err, "header failed basic validation") + } + + // TrustedHeight is less than Header for updates + // and less than or equal to Header for misbehaviour + if h.TrustedHeight.GT(h.GetHeight()) { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "TrustedHeight %d must be less than or equal to header height %d", + h.TrustedHeight, h.GetHeight()) + } + + if h.ValidatorSet == nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "validator set is nil") + } + tmValset, err := tmtypes.ValidatorSetFromProto(h.ValidatorSet) + if err != nil { + return sdkerrors.Wrap(err, "validator set is not tendermint validator set") + } + if !bytes.Equal(h.Header.ValidatorsHash, tmValset.Hash()) { + return sdkerrors.Wrap(clienttypes.ErrInvalidHeader, "validator set does not match hash") + } + return nil +} diff --git a/light-clients/07-tendermint/types/header_test.go b/light-clients/07-tendermint/types/header_test.go new file mode 100644 index 0000000000..97647f8614 --- /dev/null +++ b/light-clients/07-tendermint/types/header_test.go @@ -0,0 +1,82 @@ +package types_test + +import ( + "time" + + tmprotocrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" +) + +func (suite *TendermintTestSuite) TestGetHeight() { + header := suite.chainA.LastHeader + suite.Require().NotEqual(uint64(0), header.GetHeight()) +} + +func (suite *TendermintTestSuite) TestGetTime() { + header := suite.chainA.LastHeader + suite.Require().NotEqual(time.Time{}, header.GetTime()) +} + +func (suite *TendermintTestSuite) TestHeaderValidateBasic() { + var ( + header *types.Header + ) + testCases := []struct { + name string + malleate func() + expPass bool + }{ + {"valid header", func() {}, true}, + {"header is nil", func() { + header.Header = nil + }, false}, + {"signed header is nil", func() { + header.SignedHeader = nil + }, false}, + {"SignedHeaderFromProto failed", func() { + header.SignedHeader.Commit.Height = -1 + }, false}, + {"signed header failed tendermint ValidateBasic", func() { + header = suite.chainA.LastHeader + header.SignedHeader.Commit = nil + }, false}, + {"trusted height is greater than header height", func() { + header.TrustedHeight = header.GetHeight().(clienttypes.Height).Increment().(clienttypes.Height) + }, false}, + {"validator set nil", func() { + header.ValidatorSet = nil + }, false}, + {"ValidatorSetFromProto failed", func() { + header.ValidatorSet.Validators[0].PubKey = tmprotocrypto.PublicKey{} + }, false}, + {"header validator hash does not equal hash of validator set", func() { + // use chainB's randomly generated validator set + header.ValidatorSet = suite.chainB.LastHeader.ValidatorSet + }, false}, + } + + suite.Require().Equal(exported.Tendermint, suite.header.ClientType()) + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + header = suite.chainA.LastHeader // must be explicitly changed in malleate + + tc.malleate() + + err := header.ValidateBasic() + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/light-clients/07-tendermint/types/misbehaviour.go b/light-clients/07-tendermint/types/misbehaviour.go new file mode 100644 index 0000000000..340130d29f --- /dev/null +++ b/light-clients/07-tendermint/types/misbehaviour.go @@ -0,0 +1,141 @@ +package types + +import ( + "bytes" + "time" + + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.Misbehaviour = &Misbehaviour{} + +// NewMisbehaviour creates a new Misbehaviour instance. +func NewMisbehaviour(clientID string, header1, header2 *Header) *Misbehaviour { + return &Misbehaviour{ + ClientId: clientID, + Header1: header1, + Header2: header2, + } +} + +// ClientType is Tendermint light client +func (misbehaviour Misbehaviour) ClientType() string { + return exported.Tendermint +} + +// GetClientID returns the ID of the client that committed a misbehaviour. +func (misbehaviour Misbehaviour) GetClientID() string { + return misbehaviour.ClientId +} + +// GetHeight returns the height at which misbehaviour occurred +// +// NOTE: assumes that misbehaviour headers have the same height +func (misbehaviour Misbehaviour) GetHeight() exported.Height { + return misbehaviour.Header1.GetHeight() +} + +// GetTime returns the timestamp at which misbehaviour occurred. It uses the +// maximum value from both headers to prevent producing an invalid header outside +// of the misbehaviour age range. +func (misbehaviour Misbehaviour) GetTime() time.Time { + t1, t2 := misbehaviour.Header1.GetTime(), misbehaviour.Header2.GetTime() + if t1.After(t2) { + return t1 + } + return t2 +} + +// ValidateBasic implements Misbehaviour interface +func (misbehaviour Misbehaviour) ValidateBasic() error { + if misbehaviour.Header1 == nil { + return sdkerrors.Wrap(ErrInvalidHeader, "misbehaviour Header1 cannot be nil") + } + if misbehaviour.Header2 == nil { + return sdkerrors.Wrap(ErrInvalidHeader, "misbehaviour Header2 cannot be nil") + } + if misbehaviour.Header1.TrustedHeight.RevisionHeight == 0 { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "misbehaviour Header1 cannot have zero revision height") + } + if misbehaviour.Header2.TrustedHeight.RevisionHeight == 0 { + return sdkerrors.Wrapf(ErrInvalidHeaderHeight, "misbehaviour Header2 cannot have zero revision height") + } + if misbehaviour.Header1.TrustedValidators == nil { + return sdkerrors.Wrap(ErrInvalidValidatorSet, "trusted validator set in Header1 cannot be empty") + } + if misbehaviour.Header2.TrustedValidators == nil { + return sdkerrors.Wrap(ErrInvalidValidatorSet, "trusted validator set in Header2 cannot be empty") + } + if misbehaviour.Header1.Header.ChainID != misbehaviour.Header2.Header.ChainID { + return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers must have identical chainIDs") + } + + if err := host.ClientIdentifierValidator(misbehaviour.ClientId); err != nil { + return sdkerrors.Wrap(err, "misbehaviour client ID is invalid") + } + + // ValidateBasic on both validators + if err := misbehaviour.Header1.ValidateBasic(); err != nil { + return sdkerrors.Wrap( + clienttypes.ErrInvalidMisbehaviour, + sdkerrors.Wrap(err, "header 1 failed validation").Error(), + ) + } + if err := misbehaviour.Header2.ValidateBasic(); err != nil { + return sdkerrors.Wrap( + clienttypes.ErrInvalidMisbehaviour, + sdkerrors.Wrap(err, "header 2 failed validation").Error(), + ) + } + // Ensure that Heights are the same + if misbehaviour.Header1.GetHeight() != misbehaviour.Header2.GetHeight() { + return sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour, "headers in misbehaviour are on different heights (%d ≠ %d)", misbehaviour.Header1.GetHeight(), misbehaviour.Header2.GetHeight()) + } + + blockID1, err := tmtypes.BlockIDFromProto(&misbehaviour.Header1.SignedHeader.Commit.BlockID) + if err != nil { + return sdkerrors.Wrap(err, "invalid block ID from header 1 in misbehaviour") + } + blockID2, err := tmtypes.BlockIDFromProto(&misbehaviour.Header2.SignedHeader.Commit.BlockID) + if err != nil { + return sdkerrors.Wrap(err, "invalid block ID from header 2 in misbehaviour") + } + + // Ensure that Commit Hashes are different + if bytes.Equal(blockID1.Hash, blockID2.Hash) { + return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers block hashes are equal") + } + if err := validCommit(misbehaviour.Header1.Header.ChainID, *blockID1, + misbehaviour.Header1.Commit, misbehaviour.Header1.ValidatorSet); err != nil { + return err + } + if err := validCommit(misbehaviour.Header2.Header.ChainID, *blockID2, + misbehaviour.Header2.Commit, misbehaviour.Header2.ValidatorSet); err != nil { + return err + } + return nil +} + +// validCommit checks if the given commit is a valid commit from the passed-in validatorset +func validCommit(chainID string, blockID tmtypes.BlockID, commit *tmproto.Commit, valSet *tmproto.ValidatorSet) (err error) { + tmCommit, err := tmtypes.CommitFromProto(commit) + if err != nil { + return sdkerrors.Wrap(err, "commit is not tendermint commit type") + } + tmValset, err := tmtypes.ValidatorSetFromProto(valSet) + if err != nil { + return sdkerrors.Wrap(err, "validator set is not tendermint validator set type") + } + + if err := tmValset.VerifyCommitLight(chainID, blockID, tmCommit.Height, tmCommit); err != nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "validator set did not commit to header") + } + + return nil +} diff --git a/light-clients/07-tendermint/types/misbehaviour_handle.go b/light-clients/07-tendermint/types/misbehaviour_handle.go new file mode 100644 index 0000000000..4c55552d30 --- /dev/null +++ b/light-clients/07-tendermint/types/misbehaviour_handle.go @@ -0,0 +1,119 @@ +package types + +import ( + "time" + + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CheckMisbehaviourAndUpdateState determines whether or not two conflicting +// headers at the same height would have convinced the light client. +// +// NOTE: consensusState1 is the trusted consensus state that corresponds to the TrustedHeight +// of misbehaviour.Header1 +// Similarly, consensusState2 is the trusted consensus state that corresponds +// to misbehaviour.Header2 +func (cs ClientState) CheckMisbehaviourAndUpdateState( + ctx sdk.Context, + cdc codec.BinaryMarshaler, + clientStore sdk.KVStore, + misbehaviour exported.Misbehaviour, +) (exported.ClientState, error) { + tmMisbehaviour, ok := misbehaviour.(*Misbehaviour) + if !ok { + return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "expected type %T, got %T", misbehaviour, &Misbehaviour{}) + } + + // If client is already frozen at earlier height than misbehaviour, return with error + if cs.IsFrozen() && cs.FrozenHeight.LTE(misbehaviour.GetHeight()) { + return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour, + "client is already frozen at earlier height %s than misbehaviour height %s", cs.FrozenHeight, misbehaviour.GetHeight()) + } + + // Retrieve trusted consensus states for each Header in misbehaviour + // and unmarshal from clientStore + + // Get consensus bytes from clientStore + tmConsensusState1, err := GetConsensusState(clientStore, cdc, tmMisbehaviour.Header1.TrustedHeight) + if err != nil { + return nil, sdkerrors.Wrapf(err, "could not get trusted consensus state from clientStore for Header1 at TrustedHeight: %s", tmMisbehaviour.Header1) + } + + // Get consensus bytes from clientStore + tmConsensusState2, err := GetConsensusState(clientStore, cdc, tmMisbehaviour.Header2.TrustedHeight) + if err != nil { + return nil, sdkerrors.Wrapf(err, "could not get trusted consensus state from clientStore for Header2 at TrustedHeight: %s", tmMisbehaviour.Header2) + } + + // Check the validity of the two conflicting headers against their respective + // trusted consensus states + // NOTE: header height and commitment root assertions are checked in + // misbehaviour.ValidateBasic by the client keeper and msg.ValidateBasic + // by the base application. + if err := checkMisbehaviourHeader( + &cs, tmConsensusState1, tmMisbehaviour.Header1, ctx.BlockTime(), + ); err != nil { + return nil, sdkerrors.Wrap(err, "verifying Header1 in Misbehaviour failed") + } + if err := checkMisbehaviourHeader( + &cs, tmConsensusState2, tmMisbehaviour.Header2, ctx.BlockTime(), + ); err != nil { + return nil, sdkerrors.Wrap(err, "verifying Header2 in Misbehaviour failed") + } + + cs.FrozenHeight = tmMisbehaviour.GetHeight().(clienttypes.Height) + return &cs, nil +} + +// checkMisbehaviourHeader checks that a Header in Misbehaviour is valid misbehaviour given +// a trusted ConsensusState +func checkMisbehaviourHeader( + clientState *ClientState, consState *ConsensusState, header *Header, currentTimestamp time.Time, +) error { + + tmTrustedValset, err := tmtypes.ValidatorSetFromProto(header.TrustedValidators) + if err != nil { + return sdkerrors.Wrap(err, "trusted validator set is not tendermint validator set type") + } + + tmCommit, err := tmtypes.CommitFromProto(header.Commit) + if err != nil { + return sdkerrors.Wrap(err, "commit is not tendermint commit type") + } + + // check the trusted fields for the header against ConsensusState + if err := checkTrustedHeader(header, consState); err != nil { + return err + } + + // assert that the age of the trusted consensus state is not older than the trusting period + if currentTimestamp.Sub(consState.Timestamp) >= clientState.TrustingPeriod { + return sdkerrors.Wrapf( + ErrTrustingPeriodExpired, + "current timestamp minus the latest consensus state timestamp is greater than or equal to the trusting period (%d >= %d)", + currentTimestamp.Sub(consState.Timestamp), clientState.TrustingPeriod, + ) + } + + chainID := clientState.GetChainID() + // If chainID is in revision format, then set revision number of chainID with the revision number + // of the misbehaviour header + if clienttypes.IsRevisionFormat(chainID) { + chainID, _ = clienttypes.SetRevisionNumber(chainID, header.GetHeight().GetRevisionNumber()) + } + + // - ValidatorSet must have TrustLevel similarity with trusted FromValidatorSet + // - ValidatorSets on both headers are valid given the last trusted ValidatorSet + if err := tmTrustedValset.VerifyCommitLightTrusting( + chainID, tmCommit, clientState.TrustLevel.ToTendermint(), + ); err != nil { + return sdkerrors.Wrapf(clienttypes.ErrInvalidMisbehaviour, "validator set in header has too much change from trusted validator set: %v", err) + } + return nil +} diff --git a/light-clients/07-tendermint/types/misbehaviour_handle_test.go b/light-clients/07-tendermint/types/misbehaviour_handle_test.go new file mode 100644 index 0000000000..3ca2e4dc11 --- /dev/null +++ b/light-clients/07-tendermint/types/misbehaviour_handle_test.go @@ -0,0 +1,372 @@ +package types_test + +import ( + "fmt" + "time" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +func (suite *TendermintTestSuite) TestCheckMisbehaviourAndUpdateState() { + altPrivVal := ibctestingmock.NewPV() + altPubKey, err := altPrivVal.GetPubKey() + suite.Require().NoError(err) + + altVal := tmtypes.NewValidator(altPubKey, 4) + + // Create bothValSet with both suite validator and altVal + bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + bothValsHash := bothValSet.Hash() + // Create alternative validator set with only altVal + altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal}) + + _, suiteVal := suite.valSet.GetByIndex(0) + + // Create signer array and ensure it is in same order as bothValSet + bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal) + + altSigners := []tmtypes.PrivValidator{altPrivVal} + + heightMinus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1) + heightMinus3 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-3) + + testCases := []struct { + name string + clientState exported.ClientState + consensusState1 exported.ConsensusState + height1 clienttypes.Height + consensusState2 exported.ConsensusState + height2 clienttypes.Height + misbehaviour exported.Misbehaviour + timestamp time.Time + expPass bool + }{ + { + "valid misbehavior misbehaviour", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehavior at height greater than last consensusState", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehaviour with different trusted heights", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehaviour at a previous revision", + types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehaviour at a future revision", + types.NewClientState(chainIDRevision0, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainIDRevision0, 3, heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainIDRevision0, 3, heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "valid misbehaviour with trusted heights at a previous revision", + types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainIDRevision1, 1, heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainIDRevision1, 1, heightMinus3, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "consensus state's valset hash different from misbehaviour should still pass", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, suite.valSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + true, + }, + { + "invalid misbehavior misbehaviour from different chain", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "invalid misbehavior misbehaviour with trusted height different from trusted consensus state", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "invalid misbehavior misbehaviour with trusted validators different from trusted consensus state", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), suite.valsHash), + heightMinus3, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus3, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "already frozen client state", + &types.ClientState{FrozenHeight: clienttypes.NewHeight(0, 1)}, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "trusted consensus state does not exist", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + nil, // consensus state for trusted height - 1 does not exist in store + clienttypes.Height{}, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "invalid tendermint misbehaviour", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + nil, + suite.now, + false, + }, + { + "provided height > header height", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "trusting period expired", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(time.Time{}, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + heightMinus1, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now.Add(trustingPeriod), + false, + }, + { + "trusted validators is incorrect for given consensus state", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, suite.valSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, suite.valSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "first valset has too much change", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, altValSet, bothValSet, altSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), bothValSet, bothValSet, bothSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "second valset has too much change", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, bothValSet, bothValSet, bothSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), altValSet, bothValSet, altSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + { + "both valsets have too much change", + types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false), + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + types.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot(tmhash.Sum([]byte("app_hash"))), bothValsHash), + height, + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now, altValSet, bothValSet, altSigners), + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), height, suite.now.Add(time.Minute), altValSet, bothValSet, altSigners), + ClientId: chainID, + }, + suite.now, + false, + }, + } + + for i, tc := range testCases { + tc := tc + suite.Run(fmt.Sprintf("Case: %s", tc.name), func() { + // reset suite to create fresh application state + suite.SetupTest() + + // Set current timestamp in context + ctx := suite.chainA.GetContext().WithBlockTime(tc.timestamp) + + // Set trusted consensus states in client store + + if tc.consensusState1 != nil { + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height1, tc.consensusState1) + } + if tc.consensusState2 != nil { + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, tc.height2, tc.consensusState2) + } + + clientState, err := tc.clientState.CheckMisbehaviourAndUpdateState( + ctx, + suite.cdc, + suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(ctx, clientID), // pass in clientID prefixed clientStore + tc.misbehaviour, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + suite.Require().NotNil(clientState, "valid test case %d failed: %s", i, tc.name) + suite.Require().True(clientState.IsFrozen(), "valid test case %d failed: %s", i, tc.name) + suite.Require().Equal(tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight(), + "valid test case %d failed: %s. Expected FrozenHeight %s got %s", tc.misbehaviour.GetHeight(), clientState.GetFrozenHeight()) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + suite.Require().Nil(clientState, "invalid test case %d passed: %s", i, tc.name) + } + }) + } +} diff --git a/light-clients/07-tendermint/types/misbehaviour_test.go b/light-clients/07-tendermint/types/misbehaviour_test.go new file mode 100644 index 0000000000..dede4e6021 --- /dev/null +++ b/light-clients/07-tendermint/types/misbehaviour_test.go @@ -0,0 +1,244 @@ +package types_test + +import ( + "time" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +func (suite *TendermintTestSuite) TestMisbehaviour() { + signers := []tmtypes.PrivValidator{suite.privVal} + heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1) + + misbehaviour := &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers), + ClientId: clientID, + } + + suite.Require().Equal(exported.Tendermint, misbehaviour.ClientType()) + suite.Require().Equal(clientID, misbehaviour.GetClientID()) + suite.Require().Equal(height, misbehaviour.GetHeight()) +} + +func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { + altPrivVal := ibctestingmock.NewPV() + altPubKey, err := altPrivVal.GetPubKey() + suite.Require().NoError(err) + + revisionHeight := int64(height.RevisionHeight) + + altVal := tmtypes.NewValidator(altPubKey, revisionHeight) + + // Create bothValSet with both suite validator and altVal + bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + // Create alternative validator set with only altVal + altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal}) + + signers := []tmtypes.PrivValidator{suite.privVal} + + // Create signer array and ensure it is in same order as bothValSet + _, suiteVal := suite.valSet.GetByIndex(0) + bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal) + + altSigners := []tmtypes.PrivValidator{altPrivVal} + + heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1) + + testCases := []struct { + name string + misbehaviour *types.Misbehaviour + malleateMisbehaviour func(misbehaviour *types.Misbehaviour) error + expPass bool + }{ + { + "valid misbehaviour", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + true, + }, + { + "misbehaviour Header1 is nil", + types.NewMisbehaviour(clientID, nil, suite.header), + func(m *types.Misbehaviour) error { return nil }, + false, + }, + { + "misbehaviour Header2 is nil", + types.NewMisbehaviour(clientID, suite.header, nil), + func(m *types.Misbehaviour) error { return nil }, + false, + }, + { + "valid misbehaviour with different trusted headers", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.NewHeight(0, height.RevisionHeight-3), suite.now.Add(time.Minute), suite.valSet, bothValSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + true, + }, + { + "trusted height is 0 in Header1", + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers), + Header2: suite.header, + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "trusted height is 0 in Header2", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "trusted valset is nil in Header1", + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, nil, signers), + Header2: suite.header, + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "trusted valset is nil in Header2", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, nil, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "invalid client ID ", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers), + ClientId: "GAIA", + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "chainIDs do not match", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "mismatched heights", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, 6, clienttypes.NewHeight(0, 4), suite.now, suite.valSet, suite.valSet, signers), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "same block id", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.header, + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { return nil }, + false, + }, + { + "header 1 doesn't have 2/3 majority", + &types.Misbehaviour{ + Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners), + Header2: suite.header, + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { + // voteSet contains only altVal which is less than 2/3 of total power (height/1height) + wrongVoteSet := tmtypes.NewVoteSet(chainID, int64(misbehaviour.Header1.GetHeight().GetRevisionHeight()), 1, tmproto.PrecommitType, altValSet) + blockID, err := tmtypes.BlockIDFromProto(&misbehaviour.Header1.Commit.BlockID) + if err != nil { + return err + } + + tmCommit, err := tmtypes.MakeCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header1.Commit.Round, wrongVoteSet, altSigners, suite.now) + misbehaviour.Header1.Commit = tmCommit.ToProto() + return err + }, + false, + }, + { + "header 2 doesn't have 2/3 majority", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { + // voteSet contains only altVal which is less than 2/3 of total power (height/1height) + wrongVoteSet := tmtypes.NewVoteSet(chainID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), 1, tmproto.PrecommitType, altValSet) + blockID, err := tmtypes.BlockIDFromProto(&misbehaviour.Header2.Commit.BlockID) + if err != nil { + return err + } + + tmCommit, err := tmtypes.MakeCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header2.Commit.Round, wrongVoteSet, altSigners, suite.now) + misbehaviour.Header2.Commit = tmCommit.ToProto() + return err + }, + false, + }, + { + "validators sign off on wrong commit", + &types.Misbehaviour{ + Header1: suite.header, + Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, suite.valSet, bothSigners), + ClientId: clientID, + }, + func(misbehaviour *types.Misbehaviour) error { + tmBlockID := ibctesting.MakeBlockID(tmhash.Sum([]byte("other_hash")), 3, tmhash.Sum([]byte("other_partset"))) + misbehaviour.Header2.Commit.BlockID = tmBlockID.ToProto() + return nil + }, + false, + }, + } + + for i, tc := range testCases { + tc := tc + + err := tc.malleateMisbehaviour(tc.misbehaviour) + suite.Require().NoError(err) + + if tc.expPass { + suite.Require().NoError(tc.misbehaviour.ValidateBasic(), "valid test case %d failed: %s", i, tc.name) + } else { + suite.Require().Error(tc.misbehaviour.ValidateBasic(), "invalid test case %d passed: %s", i, tc.name) + } + } +} diff --git a/light-clients/07-tendermint/types/proposal_handle.go b/light-clients/07-tendermint/types/proposal_handle.go new file mode 100644 index 0000000000..c64c52b3f8 --- /dev/null +++ b/light-clients/07-tendermint/types/proposal_handle.go @@ -0,0 +1,134 @@ +package types + +import ( + "reflect" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CheckSubstituteAndUpdateState will try to update the client with the state of the +// substitute if and only if the proposal passes and one of the following conditions are +// satisfied: +// 1) AllowUpdateAfterMisbehaviour and IsFrozen() = true +// 2) AllowUpdateAfterExpiry=true and Expire(ctx.BlockTime) = true +// +// The following must always be true: +// - The substitute client is the same type as the subject client +// - The subject and substitute client states match in all parameters (expect frozen height, latest height, and chain-id) +// +// In case 1) before updating the client, the client will be unfrozen by resetting +// the FrozenHeight to the zero Height. If a client is frozen and AllowUpdateAfterMisbehaviour +// is set to true, the client will be unexpired even if AllowUpdateAfterExpiry is set to false. +// Note, that even if the subject is updated to the state of the substitute, an error may be +// returned if the updated client state is invalid or the client is expired. +func (cs ClientState) CheckSubstituteAndUpdateState( + ctx sdk.Context, cdc codec.BinaryMarshaler, subjectClientStore, + substituteClientStore sdk.KVStore, substituteClient exported.ClientState, + initialHeight exported.Height, +) (exported.ClientState, error) { + substituteClientState, ok := substituteClient.(*ClientState) + if !ok { + return nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidClient, "expected type %T, got %T", &ClientState{}, substituteClient, + ) + } + + // substitute clients are not allowed to be upgraded during the voting period + // If an upgrade passes before the subject client has been updated, a new proposal must be created + // with an initial height that contains the new revision number. + if substituteClientState.GetLatestHeight().GetRevisionNumber() != initialHeight.GetRevisionNumber() { + return nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidHeight, "substitute client revision number must equal initial height revision number (%d != %d)", + substituteClientState.GetLatestHeight().GetRevisionNumber(), initialHeight.GetRevisionNumber(), + ) + } + + if !IsMatchingClientState(cs, *substituteClientState) { + return nil, sdkerrors.Wrap(clienttypes.ErrInvalidSubstitute, "subject client state does not match substitute client state") + } + + // get consensus state corresponding to client state to check if the client is expired + consensusState, err := GetConsensusState(subjectClientStore, cdc, cs.GetLatestHeight()) + if err != nil { + return nil, sdkerrors.Wrapf( + err, "unexpected error: could not get consensus state from clientstore at height: %d", cs.GetLatestHeight(), + ) + } + + switch { + + case cs.IsFrozen(): + if !cs.AllowUpdateAfterMisbehaviour { + return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client is not allowed to be unfrozen") + } + + // unfreeze the client + cs.FrozenHeight = clienttypes.ZeroHeight() + + case cs.IsExpired(consensusState.Timestamp, ctx.BlockTime()): + if !cs.AllowUpdateAfterExpiry { + return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client is not allowed to be unexpired") + } + + default: + return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "client cannot be updated with proposal") + } + + // copy consensus states and processed time from substitute to subject + // starting from initial height and ending on the latest height (inclusive) + for i := initialHeight.GetRevisionHeight(); i <= substituteClientState.GetLatestHeight().GetRevisionHeight(); i++ { + height := clienttypes.NewHeight(substituteClientState.GetLatestHeight().GetRevisionNumber(), i) + + consensusState, err := GetConsensusState(substituteClientStore, cdc, height) + if err != nil { + // not all consensus states will be filled in + continue + } + SetConsensusState(subjectClientStore, cdc, consensusState, height) + + processedTime, found := GetProcessedTime(substituteClientStore, height) + if !found { + continue + } + SetProcessedTime(subjectClientStore, height, processedTime) + + } + + cs.LatestHeight = substituteClientState.LatestHeight + + // validate the updated client and ensure it isn't expired + if err := cs.Validate(); err != nil { + return nil, sdkerrors.Wrap(err, "unexpected error: updated subject client state is invalid") + } + + latestConsensusState, err := GetConsensusState(subjectClientStore, cdc, cs.GetLatestHeight()) + if err != nil { + return nil, sdkerrors.Wrapf( + err, "unexpected error: could not get consensus state for updated subject client from clientstore at height: %d", cs.GetLatestHeight(), + ) + } + + if cs.IsExpired(latestConsensusState.Timestamp, ctx.BlockTime()) { + return nil, sdkerrors.Wrap(clienttypes.ErrInvalidClient, "updated subject client is expired") + } + + return &cs, nil +} + +// IsMatchingClientState returns true if all the client state parameters match +// except for frozen height, latest height, and chain-id. +func IsMatchingClientState(subject, substitute ClientState) bool { + // zero out parameters which do not need to match + subject.LatestHeight = clienttypes.ZeroHeight() + subject.FrozenHeight = clienttypes.ZeroHeight() + substitute.LatestHeight = clienttypes.ZeroHeight() + substitute.FrozenHeight = clienttypes.ZeroHeight() + subject.ChainId = "" + substitute.ChainId = "" + + return reflect.DeepEqual(subject, substitute) +} diff --git a/light-clients/07-tendermint/types/proposal_handle_test.go b/light-clients/07-tendermint/types/proposal_handle_test.go new file mode 100644 index 0000000000..66a5120309 --- /dev/null +++ b/light-clients/07-tendermint/types/proposal_handle_test.go @@ -0,0 +1,387 @@ +package types_test + +import ( + "time" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +var ( + frozenHeight = clienttypes.NewHeight(0, 1) +) + +func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() { + var ( + substitute string + substituteClientState exported.ClientState + initialHeight clienttypes.Height + ) + testCases := []struct { + name string + malleate func() + }{ + { + "solo machine used for substitute", func() { + substituteClientState = ibctesting.NewSolomachine(suite.T(), suite.cdc, "solo machine", "", 1).ClientState() + }, + }, + { + "initial height and substitute revision numbers do not match", func() { + initialHeight = clienttypes.NewHeight(substituteClientState.GetLatestHeight().GetRevisionNumber()+1, 1) + }, + }, + { + "non-matching substitute", func() { + substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState) + tmClientState, ok := substituteClientState.(*types.ClientState) + suite.Require().True(ok) + + tmClientState.ChainId = tmClientState.ChainId + "different chain" + }, + }, + { + "updated client is invalid - revision height is zero", func() { + substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState) + tmClientState, ok := substituteClientState.(*types.ClientState) + suite.Require().True(ok) + // match subject + tmClientState.AllowUpdateAfterMisbehaviour = true + tmClientState.AllowUpdateAfterExpiry = true + + // will occur. This case should never occur (caught by upstream checks) + initialHeight = clienttypes.NewHeight(5, 0) + tmClientState.LatestHeight = clienttypes.NewHeight(5, 0) + }, + }, + { + "updated client is expired", func() { + substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState) + tmClientState, ok := substituteClientState.(*types.ClientState) + suite.Require().True(ok) + initialHeight = tmClientState.LatestHeight + + // match subject + tmClientState.AllowUpdateAfterMisbehaviour = true + tmClientState.AllowUpdateAfterExpiry = true + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState) + + // update substitute a few times + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint) + suite.Require().NoError(err) + substituteClientState = suite.chainA.GetClientState(substitute) + + err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint) + suite.Require().NoError(err) + + suite.chainA.ExpireClient(tmClientState.TrustingPeriod) + suite.chainB.ExpireClient(tmClientState.TrustingPeriod) + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + + substituteClientState = suite.chainA.GetClientState(substitute) + }, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + + suite.SetupTest() // reset + + subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + subjectClientState := suite.chainA.GetClientState(subject).(*types.ClientState) + subjectClientState.AllowUpdateAfterMisbehaviour = true + subjectClientState.AllowUpdateAfterExpiry = true + + // expire subject + suite.chainA.ExpireClient(subjectClientState.TrustingPeriod) + suite.chainB.ExpireClient(subjectClientState.TrustingPeriod) + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + + tc.malleate() + + subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), subject) + substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute) + + updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, initialHeight) + suite.Require().Error(err) + suite.Require().Nil(updatedClient) + }) + } +} + +// to expire clients, time needs to be fast forwarded on both chainA and chainB. +// this is to prevent headers from failing when attempting to update later. +func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() { + testCases := []struct { + name string + AllowUpdateAfterExpiry bool + AllowUpdateAfterMisbehaviour bool + FreezeClient bool + ExpireClient bool + expPass bool + }{ + { + name: "not allowed to be updated, not frozen or expired", + AllowUpdateAfterExpiry: false, + AllowUpdateAfterMisbehaviour: false, + FreezeClient: false, + ExpireClient: false, + expPass: false, + }, + { + name: "not allowed to be updated, client is frozen", + AllowUpdateAfterExpiry: false, + AllowUpdateAfterMisbehaviour: false, + FreezeClient: true, + ExpireClient: false, + expPass: false, + }, + { + name: "not allowed to be updated, client is expired", + AllowUpdateAfterExpiry: false, + AllowUpdateAfterMisbehaviour: false, + FreezeClient: false, + ExpireClient: true, + expPass: false, + }, + { + name: "not allowed to be updated, client is frozen and expired", + AllowUpdateAfterExpiry: false, + AllowUpdateAfterMisbehaviour: false, + FreezeClient: true, + ExpireClient: true, + expPass: false, + }, + { + name: "allowed to be updated only after misbehaviour, not frozen or expired", + AllowUpdateAfterExpiry: false, + AllowUpdateAfterMisbehaviour: true, + FreezeClient: false, + ExpireClient: false, + expPass: false, + }, + { + name: "allowed to be updated only after misbehaviour, client is expired", + AllowUpdateAfterExpiry: false, + AllowUpdateAfterMisbehaviour: true, + FreezeClient: false, + ExpireClient: true, + expPass: false, + }, + { + name: "allowed to be updated only after expiry, not frozen or expired", + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: false, + FreezeClient: false, + ExpireClient: false, + expPass: false, + }, + { + name: "allowed to be updated only after expiry, client is frozen", + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: false, + FreezeClient: true, + ExpireClient: false, + expPass: false, + }, + { + name: "PASS: allowed to be updated only after misbehaviour, client is frozen", + AllowUpdateAfterExpiry: false, + AllowUpdateAfterMisbehaviour: true, + FreezeClient: true, + ExpireClient: false, + expPass: true, + }, + { + name: "PASS: allowed to be updated only after misbehaviour, client is frozen and expired", + AllowUpdateAfterExpiry: false, + AllowUpdateAfterMisbehaviour: true, + FreezeClient: true, + ExpireClient: true, + expPass: true, + }, + { + name: "PASS: allowed to be updated only after expiry, client is expired", + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: false, + FreezeClient: false, + ExpireClient: true, + expPass: true, + }, + { + name: "allowed to be updated only after expiry, client is frozen and expired", + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: false, + FreezeClient: true, + ExpireClient: true, + expPass: false, + }, + { + name: "allowed to be updated after expiry and misbehaviour, not frozen or expired", + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: true, + FreezeClient: false, + ExpireClient: false, + expPass: false, + }, + { + name: "PASS: allowed to be updated after expiry and misbehaviour, client is frozen", + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: true, + FreezeClient: true, + ExpireClient: false, + expPass: true, + }, + { + name: "PASS: allowed to be updated after expiry and misbehaviour, client is expired", + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: true, + FreezeClient: false, + ExpireClient: true, + expPass: true, + }, + { + name: "PASS: allowed to be updated after expiry and misbehaviour, client is frozen and expired", + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: true, + FreezeClient: true, + ExpireClient: true, + expPass: true, + }, + } + + for _, tc := range testCases { + tc := tc + + // for each test case a header used for unexpiring clients and unfreezing + // a client are each tested to ensure that unexpiry headers cannot update + // a client when a unfreezing header is required. + suite.Run(tc.name, func() { + + // start by testing unexpiring the client + suite.SetupTest() // reset + + // construct subject using test case parameters + subject, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + subjectClientState := suite.chainA.GetClientState(subject).(*types.ClientState) + subjectClientState.AllowUpdateAfterExpiry = tc.AllowUpdateAfterExpiry + subjectClientState.AllowUpdateAfterMisbehaviour = tc.AllowUpdateAfterMisbehaviour + + // apply freezing or expiry as determined by the test case + if tc.FreezeClient { + subjectClientState.FrozenHeight = frozenHeight + } + if tc.ExpireClient { + suite.chainA.ExpireClient(subjectClientState.TrustingPeriod) + suite.chainB.ExpireClient(subjectClientState.TrustingPeriod) + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + } + + // construct the substitute to match the subject client + // NOTE: the substitute is explicitly created after the freezing or expiry occurs, + // primarily to prevent the substitute from becoming frozen. It also should be + // the natural flow of events in practice. The subject will become frozen/expired + // and a substitute will be created along with a governance proposal as a response + + substitute, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + substituteClientState := suite.chainA.GetClientState(substitute).(*types.ClientState) + substituteClientState.AllowUpdateAfterExpiry = tc.AllowUpdateAfterExpiry + substituteClientState.AllowUpdateAfterMisbehaviour = tc.AllowUpdateAfterMisbehaviour + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, substituteClientState) + + initialHeight := substituteClientState.GetLatestHeight() + + // update substitute a few times + for i := 0; i < 3; i++ { + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, substitute, exported.Tendermint) + suite.Require().NoError(err) + // skip a block + suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + } + + // get updated substitute + substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState) + + subjectClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), subject) + substituteClientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute) + updatedClient, err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState, initialHeight) + + if tc.expPass { + suite.Require().NoError(err) + suite.Require().Equal(clienttypes.ZeroHeight(), updatedClient.GetFrozenHeight()) + } else { + suite.Require().Error(err) + suite.Require().Nil(updatedClient) + } + + }) + } +} + +func (suite *TendermintTestSuite) TestIsMatchingClientState() { + var ( + subject, substitute string + subjectClientState, substituteClientState *types.ClientState + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "matching clients", func() { + subjectClientState = suite.chainA.GetClientState(subject).(*types.ClientState) + substituteClientState = suite.chainA.GetClientState(substitute).(*types.ClientState) + }, true, + }, + { + "matching, frozen height is not used in check for equality", func() { + subjectClientState.FrozenHeight = frozenHeight + substituteClientState.FrozenHeight = clienttypes.ZeroHeight() + }, true, + }, + { + "matching, latest height is not used in check for equality", func() { + subjectClientState.LatestHeight = clienttypes.NewHeight(0, 10) + substituteClientState.FrozenHeight = clienttypes.ZeroHeight() + }, true, + }, + { + "matching, chain id is different", func() { + subjectClientState.ChainId = "bitcoin" + substituteClientState.ChainId = "ethereum" + }, true, + }, + { + "not matching, trusting period is different", func() { + subjectClientState.TrustingPeriod = time.Duration(time.Hour * 10) + substituteClientState.TrustingPeriod = time.Duration(time.Hour * 1) + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + subject, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + substitute, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + tc.malleate() + + suite.Require().Equal(tc.expPass, types.IsMatchingClientState(*subjectClientState, *substituteClientState)) + + }) + } +} diff --git a/light-clients/07-tendermint/types/store.go b/light-clients/07-tendermint/types/store.go new file mode 100644 index 0000000000..7d6a841b89 --- /dev/null +++ b/light-clients/07-tendermint/types/store.go @@ -0,0 +1,96 @@ +package types + +import ( + "strings" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// KeyProcessedTime is appended to consensus state key to store the processed time +var KeyProcessedTime = []byte("/processedTime") + +// SetConsensusState stores the consensus state at the given height. +func SetConsensusState(clientStore sdk.KVStore, cdc codec.BinaryMarshaler, consensusState *ConsensusState, height exported.Height) { + key := host.ConsensusStateKey(height) + val := clienttypes.MustMarshalConsensusState(cdc, consensusState) + clientStore.Set(key, val) +} + +// GetConsensusState retrieves the consensus state from the client prefixed +// store. An error is returned if the consensus state does not exist. +func GetConsensusState(store sdk.KVStore, cdc codec.BinaryMarshaler, height exported.Height) (*ConsensusState, error) { + bz := store.Get(host.ConsensusStateKey(height)) + if bz == nil { + return nil, sdkerrors.Wrapf( + clienttypes.ErrConsensusStateNotFound, + "consensus state does not exist for height %s", height, + ) + } + + consensusStateI, err := clienttypes.UnmarshalConsensusState(cdc, bz) + if err != nil { + return nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "unmarshal error: %v", err) + } + + consensusState, ok := consensusStateI.(*ConsensusState) + if !ok { + return nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidConsensus, + "invalid consensus type %T, expected %T", consensusState, &ConsensusState{}, + ) + } + + return consensusState, nil +} + +// IterateProcessedTime iterates through the prefix store and applies the callback. +// If the cb returns true, then iterator will close and stop. +func IterateProcessedTime(store sdk.KVStore, cb func(key, val []byte) bool) { + iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyConsensusStatePrefix)) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keySplit := strings.Split(string(iterator.Key()), "/") + // processed time key in prefix store has format: "consensusState//processedTime" + if len(keySplit) != 3 || keySplit[2] != "processedTime" { + // ignore all consensus state keys + continue + } + + if cb(iterator.Key(), iterator.Value()) { + break + } + } +} + +// ProcessedTime Store code + +// ProcessedTimeKey returns the key under which the processed time will be stored in the client store. +func ProcessedTimeKey(height exported.Height) []byte { + return append(host.ConsensusStateKey(height), KeyProcessedTime...) +} + +// SetProcessedTime stores the time at which a header was processed and the corresponding consensus state was created. +// This is useful when validating whether a packet has reached the specified delay period in the tendermint client's +// verification functions +func SetProcessedTime(clientStore sdk.KVStore, height exported.Height, timeNs uint64) { + key := ProcessedTimeKey(height) + val := sdk.Uint64ToBigEndian(timeNs) + clientStore.Set(key, val) +} + +// GetProcessedTime gets the time (in nanoseconds) at which this chain received and processed a tendermint header. +// This is used to validate that a received packet has passed the delay period. +func GetProcessedTime(clientStore sdk.KVStore, height exported.Height) (uint64, bool) { + key := ProcessedTimeKey(height) + bz := clientStore.Get(key) + if bz == nil { + return 0, false + } + return sdk.BigEndianToUint64(bz), true +} diff --git a/light-clients/07-tendermint/types/store_test.go b/light-clients/07-tendermint/types/store_test.go new file mode 100644 index 0000000000..b8badc0947 --- /dev/null +++ b/light-clients/07-tendermint/types/store_test.go @@ -0,0 +1,113 @@ +package types_test + +import ( + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" +) + +func (suite *TendermintTestSuite) TestGetConsensusState() { + var ( + height exported.Height + clientA string + ) + + testCases := []struct { + name string + malleate func() + expPass bool + }{ + { + "success", func() {}, true, + }, + { + "consensus state not found", func() { + // use height with no consensus state set + height = height.(clienttypes.Height).Increment() + }, false, + }, + { + "not a consensus state interface", func() { + // marshal an empty client state and set as consensus state + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + clientStateBz := suite.chainA.App.IBCKeeper.ClientKeeper.MustMarshalClientState(&types.ClientState{}) + store.Set(host.ConsensusStateKey(height), clientStateBz) + }, false, + }, + { + "invalid consensus state (solomachine)", func() { + // marshal and set solomachine consensus state + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + consensusStateBz := suite.chainA.App.IBCKeeper.ClientKeeper.MustMarshalConsensusState(&solomachinetypes.ConsensusState{}) + store.Set(host.ConsensusStateKey(height), consensusStateBz) + }, false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + + clientA, _, _, _, _, _ = suite.coordinator.Setup(suite.chainA, suite.chainB, channeltypes.UNORDERED) + clientState := suite.chainA.GetClientState(clientA) + height = clientState.GetLatestHeight() + + tc.malleate() // change vars as necessary + + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + consensusState, err := types.GetConsensusState(store, suite.chainA.Codec, height) + + if tc.expPass { + suite.Require().NoError(err) + expConsensusState, found := suite.chainA.GetConsensusState(clientA, height) + suite.Require().True(found) + suite.Require().Equal(expConsensusState, consensusState) + } else { + suite.Require().Error(err) + suite.Require().Nil(consensusState) + } + }) + } +} + +func (suite *TendermintTestSuite) TestGetProcessedTime() { + // Verify ProcessedTime on CreateClient + // coordinator increments time before creating client + expectedTime := suite.chainA.CurrentHeader.Time.Add(ibctesting.TimeIncrement) + + clientA, err := suite.coordinator.CreateClient(suite.chainA, suite.chainB, exported.Tendermint) + suite.Require().NoError(err) + + clientState := suite.chainA.GetClientState(clientA) + height := clientState.GetLatestHeight() + + store := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + actualTime, ok := types.GetProcessedTime(store, height) + suite.Require().True(ok, "could not retrieve processed time for stored consensus state") + suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value") + + // Verify ProcessedTime on UpdateClient + // coordinator increments time before updating client + expectedTime = suite.chainA.CurrentHeader.Time.Add(ibctesting.TimeIncrement) + + err = suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + clientState = suite.chainA.GetClientState(clientA) + height = clientState.GetLatestHeight() + + store = suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + actualTime, ok = types.GetProcessedTime(store, height) + suite.Require().True(ok, "could not retrieve processed time for stored consensus state") + suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value") + + // try to get processed time for height that doesn't exist in store + _, ok = types.GetProcessedTime(store, clienttypes.NewHeight(1, 1)) + suite.Require().False(ok, "retrieved processed time for a non-existent consensus state") +} diff --git a/light-clients/07-tendermint/types/tendermint.pb.go b/light-clients/07-tendermint/types/tendermint.pb.go new file mode 100644 index 0000000000..aa53fb702d --- /dev/null +++ b/light-clients/07-tendermint/types/tendermint.pb.go @@ -0,0 +1,1917 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/lightclients/tendermint/v1/tendermint.proto + +package types + +import ( + fmt "fmt" + _go "github.com/confio/ics23/go" + types "github.com/cosmos/ibc-go/core/02-client/types" + types1 "github.com/cosmos/ibc-go/core/23-commitment/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" + _ "github.com/golang/protobuf/ptypes/timestamp" + github_com_tendermint_tendermint_libs_bytes "github.com/tendermint/tendermint/libs/bytes" + types2 "github.com/tendermint/tendermint/proto/tendermint/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ClientState from Tendermint tracks the current validator set, latest height, +// and a possible frozen height. +type ClientState struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + TrustLevel Fraction `protobuf:"bytes,2,opt,name=trust_level,json=trustLevel,proto3" json:"trust_level" yaml:"trust_level"` + // duration of the period since the LastestTimestamp during which the + // submitted headers are valid for upgrade + TrustingPeriod time.Duration `protobuf:"bytes,3,opt,name=trusting_period,json=trustingPeriod,proto3,stdduration" json:"trusting_period" yaml:"trusting_period"` + // duration of the staking unbonding period + UnbondingPeriod time.Duration `protobuf:"bytes,4,opt,name=unbonding_period,json=unbondingPeriod,proto3,stdduration" json:"unbonding_period" yaml:"unbonding_period"` + // defines how much new (untrusted) header's Time can drift into the future. + MaxClockDrift time.Duration `protobuf:"bytes,5,opt,name=max_clock_drift,json=maxClockDrift,proto3,stdduration" json:"max_clock_drift" yaml:"max_clock_drift"` + // Block height when the client was frozen due to a misbehaviour + FrozenHeight types.Height `protobuf:"bytes,6,opt,name=frozen_height,json=frozenHeight,proto3" json:"frozen_height" yaml:"frozen_height"` + // Latest height the client was updated to + LatestHeight types.Height `protobuf:"bytes,7,opt,name=latest_height,json=latestHeight,proto3" json:"latest_height" yaml:"latest_height"` + // Proof specifications used in verifying counterparty state + ProofSpecs []*_go.ProofSpec `protobuf:"bytes,8,rep,name=proof_specs,json=proofSpecs,proto3" json:"proof_specs,omitempty" yaml:"proof_specs"` + // Path at which next upgraded client will be committed. + // Each element corresponds to the key for a single CommitmentProof in the + // chained proof. NOTE: ClientState must stored under + // `{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored + // under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using + // the default upgrade module, upgrade_path should be []string{"upgrade", + // "upgradedIBCState"}` + UpgradePath []string `protobuf:"bytes,9,rep,name=upgrade_path,json=upgradePath,proto3" json:"upgrade_path,omitempty" yaml:"upgrade_path"` + // This flag, when set to true, will allow governance to recover a client + // which has expired + AllowUpdateAfterExpiry bool `protobuf:"varint,10,opt,name=allow_update_after_expiry,json=allowUpdateAfterExpiry,proto3" json:"allow_update_after_expiry,omitempty" yaml:"allow_update_after_expiry"` + // This flag, when set to true, will allow governance to unfreeze a client + // whose chain has experienced a misbehaviour event + AllowUpdateAfterMisbehaviour bool `protobuf:"varint,11,opt,name=allow_update_after_misbehaviour,json=allowUpdateAfterMisbehaviour,proto3" json:"allow_update_after_misbehaviour,omitempty" yaml:"allow_update_after_misbehaviour"` +} + +func (m *ClientState) Reset() { *m = ClientState{} } +func (m *ClientState) String() string { return proto.CompactTextString(m) } +func (*ClientState) ProtoMessage() {} +func (*ClientState) Descriptor() ([]byte, []int) { + return fileDescriptor_868940ee8c1cf959, []int{0} +} +func (m *ClientState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientState.Merge(m, src) +} +func (m *ClientState) XXX_Size() int { + return m.Size() +} +func (m *ClientState) XXX_DiscardUnknown() { + xxx_messageInfo_ClientState.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientState proto.InternalMessageInfo + +// ConsensusState defines the consensus state from Tendermint. +type ConsensusState struct { + // timestamp that corresponds to the block height in which the ConsensusState + // was stored. + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + // commitment root (i.e app hash) + Root types1.MerkleRoot `protobuf:"bytes,2,opt,name=root,proto3" json:"root"` + NextValidatorsHash github_com_tendermint_tendermint_libs_bytes.HexBytes `protobuf:"bytes,3,opt,name=next_validators_hash,json=nextValidatorsHash,proto3,casttype=github.com/tendermint/tendermint/libs/bytes.HexBytes" json:"next_validators_hash,omitempty" yaml:"next_validators_hash"` +} + +func (m *ConsensusState) Reset() { *m = ConsensusState{} } +func (m *ConsensusState) String() string { return proto.CompactTextString(m) } +func (*ConsensusState) ProtoMessage() {} +func (*ConsensusState) Descriptor() ([]byte, []int) { + return fileDescriptor_868940ee8c1cf959, []int{1} +} +func (m *ConsensusState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusState.Merge(m, src) +} +func (m *ConsensusState) XXX_Size() int { + return m.Size() +} +func (m *ConsensusState) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusState.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusState proto.InternalMessageInfo + +// Misbehaviour is a wrapper over two conflicting Headers +// that implements Misbehaviour interface expected by ICS-02 +type Misbehaviour struct { + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty" yaml:"client_id"` + Header1 *Header `protobuf:"bytes,2,opt,name=header_1,json=header1,proto3" json:"header_1,omitempty" yaml:"header_1"` + Header2 *Header `protobuf:"bytes,3,opt,name=header_2,json=header2,proto3" json:"header_2,omitempty" yaml:"header_2"` +} + +func (m *Misbehaviour) Reset() { *m = Misbehaviour{} } +func (m *Misbehaviour) String() string { return proto.CompactTextString(m) } +func (*Misbehaviour) ProtoMessage() {} +func (*Misbehaviour) Descriptor() ([]byte, []int) { + return fileDescriptor_868940ee8c1cf959, []int{2} +} +func (m *Misbehaviour) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Misbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Misbehaviour.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Misbehaviour) XXX_Merge(src proto.Message) { + xxx_messageInfo_Misbehaviour.Merge(m, src) +} +func (m *Misbehaviour) XXX_Size() int { + return m.Size() +} +func (m *Misbehaviour) XXX_DiscardUnknown() { + xxx_messageInfo_Misbehaviour.DiscardUnknown(m) +} + +var xxx_messageInfo_Misbehaviour proto.InternalMessageInfo + +// Header defines the Tendermint client consensus Header. +// It encapsulates all the information necessary to update from a trusted +// Tendermint ConsensusState. The inclusion of TrustedHeight and +// TrustedValidators allows this update to process correctly, so long as the +// ConsensusState for the TrustedHeight exists, this removes race conditions +// among relayers The SignedHeader and ValidatorSet are the new untrusted update +// fields for the client. The TrustedHeight is the height of a stored +// ConsensusState on the client that will be used to verify the new untrusted +// header. The Trusted ConsensusState must be within the unbonding period of +// current time in order to correctly verify, and the TrustedValidators must +// hash to TrustedConsensusState.NextValidatorsHash since that is the last +// trusted validator set at the TrustedHeight. +type Header struct { + *types2.SignedHeader `protobuf:"bytes,1,opt,name=signed_header,json=signedHeader,proto3,embedded=signed_header" json:"signed_header,omitempty" yaml:"signed_header"` + ValidatorSet *types2.ValidatorSet `protobuf:"bytes,2,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty" yaml:"validator_set"` + TrustedHeight types.Height `protobuf:"bytes,3,opt,name=trusted_height,json=trustedHeight,proto3" json:"trusted_height" yaml:"trusted_height"` + TrustedValidators *types2.ValidatorSet `protobuf:"bytes,4,opt,name=trusted_validators,json=trustedValidators,proto3" json:"trusted_validators,omitempty" yaml:"trusted_validators"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_868940ee8c1cf959, []int{3} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Header) GetValidatorSet() *types2.ValidatorSet { + if m != nil { + return m.ValidatorSet + } + return nil +} + +func (m *Header) GetTrustedHeight() types.Height { + if m != nil { + return m.TrustedHeight + } + return types.Height{} +} + +func (m *Header) GetTrustedValidators() *types2.ValidatorSet { + if m != nil { + return m.TrustedValidators + } + return nil +} + +// Fraction defines the protobuf message type for tmmath.Fraction that only +// supports positive values. +type Fraction struct { + Numerator uint64 `protobuf:"varint,1,opt,name=numerator,proto3" json:"numerator,omitempty"` + Denominator uint64 `protobuf:"varint,2,opt,name=denominator,proto3" json:"denominator,omitempty"` +} + +func (m *Fraction) Reset() { *m = Fraction{} } +func (m *Fraction) String() string { return proto.CompactTextString(m) } +func (*Fraction) ProtoMessage() {} +func (*Fraction) Descriptor() ([]byte, []int) { + return fileDescriptor_868940ee8c1cf959, []int{4} +} +func (m *Fraction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Fraction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Fraction.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Fraction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Fraction.Merge(m, src) +} +func (m *Fraction) XXX_Size() int { + return m.Size() +} +func (m *Fraction) XXX_DiscardUnknown() { + xxx_messageInfo_Fraction.DiscardUnknown(m) +} + +var xxx_messageInfo_Fraction proto.InternalMessageInfo + +func (m *Fraction) GetNumerator() uint64 { + if m != nil { + return m.Numerator + } + return 0 +} + +func (m *Fraction) GetDenominator() uint64 { + if m != nil { + return m.Denominator + } + return 0 +} + +func init() { + proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.tendermint.v1.ClientState") + proto.RegisterType((*ConsensusState)(nil), "ibcgo.lightclients.tendermint.v1.ConsensusState") + proto.RegisterType((*Misbehaviour)(nil), "ibcgo.lightclients.tendermint.v1.Misbehaviour") + proto.RegisterType((*Header)(nil), "ibcgo.lightclients.tendermint.v1.Header") + proto.RegisterType((*Fraction)(nil), "ibcgo.lightclients.tendermint.v1.Fraction") +} + +func init() { + proto.RegisterFile("ibcgo/lightclients/tendermint/v1/tendermint.proto", fileDescriptor_868940ee8c1cf959) +} + +var fileDescriptor_868940ee8c1cf959 = []byte{ + // 1080 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x41, 0x6f, 0xe3, 0x44, + 0x14, 0x6e, 0xda, 0xb2, 0x4d, 0x26, 0xe9, 0xb6, 0x78, 0x4b, 0x37, 0x2d, 0xd9, 0x38, 0x98, 0x15, + 0x0a, 0x2b, 0xd5, 0x26, 0x59, 0x24, 0xa4, 0x1e, 0x90, 0x70, 0x17, 0xd4, 0x22, 0x56, 0xaa, 0xdc, + 0x05, 0x24, 0x24, 0x64, 0x4d, 0xec, 0x89, 0x3d, 0x5a, 0xdb, 0x63, 0x3c, 0x93, 0xd0, 0xf2, 0x0b, + 0xe0, 0xb6, 0xdc, 0x38, 0x70, 0xe0, 0xc4, 0x6f, 0xd9, 0x63, 0x8f, 0x9c, 0x0c, 0x6a, 0xef, 0x1c, + 0x72, 0xe4, 0x84, 0x3c, 0x33, 0x76, 0x26, 0x6d, 0x57, 0x65, 0xb9, 0x44, 0xf3, 0xde, 0xfb, 0xde, + 0xf7, 0x65, 0xde, 0xbc, 0x79, 0x63, 0x30, 0xc0, 0x23, 0x2f, 0x20, 0x56, 0x84, 0x83, 0x90, 0x79, + 0x11, 0x46, 0x09, 0xa3, 0x16, 0x43, 0x89, 0x8f, 0xb2, 0x18, 0x27, 0xcc, 0x9a, 0x0e, 0x14, 0xcb, + 0x4c, 0x33, 0xc2, 0x88, 0xd6, 0xe3, 0x29, 0xa6, 0x9a, 0x62, 0x2a, 0xa0, 0xe9, 0x60, 0xb7, 0xa7, + 0x30, 0xb0, 0xb3, 0x14, 0x51, 0x6b, 0x0a, 0x23, 0xec, 0x43, 0x46, 0x32, 0xc1, 0xb1, 0xdb, 0xb9, + 0x86, 0xe0, 0xbf, 0x32, 0x7a, 0xcf, 0x23, 0xc9, 0x18, 0x13, 0x2b, 0xcd, 0x08, 0x19, 0x97, 0xce, + 0x6e, 0x40, 0x48, 0x10, 0x21, 0x8b, 0x5b, 0xa3, 0xc9, 0xd8, 0xf2, 0x27, 0x19, 0x64, 0x98, 0x24, + 0x32, 0xae, 0x5f, 0x8d, 0x33, 0x1c, 0x23, 0xca, 0x60, 0x9c, 0x4a, 0xc0, 0x3b, 0x62, 0xab, 0x1e, + 0xc9, 0x90, 0x25, 0xfe, 0x77, 0xb1, 0x3d, 0xb1, 0x92, 0x90, 0xf7, 0x55, 0x08, 0x89, 0x63, 0xcc, + 0xe2, 0x12, 0x56, 0x59, 0x12, 0xba, 0x15, 0x90, 0x80, 0xf0, 0xa5, 0x55, 0xac, 0x84, 0xd7, 0xf8, + 0x7b, 0x0d, 0x34, 0x0f, 0x38, 0xe3, 0x09, 0x83, 0x0c, 0x69, 0x3b, 0xa0, 0xee, 0x85, 0x10, 0x27, + 0x2e, 0xf6, 0xdb, 0xb5, 0x5e, 0xad, 0xdf, 0x70, 0xd6, 0xb8, 0x7d, 0xe4, 0x6b, 0x01, 0x68, 0xb2, + 0x6c, 0x42, 0x99, 0x1b, 0xa1, 0x29, 0x8a, 0xda, 0xcb, 0xbd, 0x5a, 0xbf, 0x39, 0x7c, 0x64, 0xde, + 0x56, 0x5c, 0xf3, 0xb3, 0x0c, 0x7a, 0xc5, 0xb6, 0xed, 0xdd, 0x97, 0xb9, 0xbe, 0x34, 0xcb, 0x75, + 0xed, 0x0c, 0xc6, 0xd1, 0xbe, 0xa1, 0x90, 0x19, 0x0e, 0xe0, 0xd6, 0x17, 0x85, 0xa1, 0x8d, 0xc1, + 0x06, 0xb7, 0x70, 0x12, 0xb8, 0x29, 0xca, 0x30, 0xf1, 0xdb, 0x2b, 0x5c, 0x6c, 0xc7, 0x14, 0x25, + 0x33, 0xcb, 0x92, 0x99, 0x4f, 0x64, 0x49, 0x6d, 0x43, 0x72, 0x6f, 0x2b, 0xdc, 0xf3, 0x7c, 0xe3, + 0x97, 0x3f, 0xf5, 0x9a, 0x73, 0xb7, 0xf4, 0x1e, 0x73, 0xa7, 0x86, 0xc1, 0xe6, 0x24, 0x19, 0x91, + 0xc4, 0x57, 0x84, 0x56, 0x6f, 0x13, 0x7a, 0x57, 0x0a, 0xdd, 0x17, 0x42, 0x57, 0x09, 0x84, 0xd2, + 0x46, 0xe5, 0x96, 0x52, 0x08, 0x6c, 0xc4, 0xf0, 0xd4, 0xf5, 0x22, 0xe2, 0x3d, 0x77, 0xfd, 0x0c, + 0x8f, 0x59, 0xfb, 0x8d, 0xd7, 0xdc, 0xd2, 0x95, 0x7c, 0x21, 0xb4, 0x1e, 0xc3, 0xd3, 0x83, 0xc2, + 0xf9, 0xa4, 0xf0, 0x69, 0x2e, 0x58, 0x1f, 0x67, 0xe4, 0x07, 0x94, 0xb8, 0x21, 0x2a, 0x0e, 0xa4, + 0x7d, 0x87, 0x8b, 0x74, 0xe4, 0x21, 0x15, 0x6d, 0x62, 0xca, 0xfe, 0x99, 0x0e, 0xcc, 0x43, 0x8e, + 0xb1, 0x3b, 0x52, 0x67, 0x4b, 0xe8, 0x2c, 0x10, 0x18, 0x4e, 0x4b, 0xd8, 0x02, 0x5b, 0x08, 0x44, + 0x90, 0x21, 0xca, 0x4a, 0x81, 0xb5, 0xd7, 0x17, 0x58, 0x20, 0x30, 0x9c, 0x96, 0xb0, 0xa5, 0xc0, + 0x11, 0x68, 0xf2, 0x4b, 0xe4, 0xd2, 0x14, 0x79, 0xb4, 0x5d, 0xef, 0xad, 0xf4, 0x9b, 0xc3, 0x4d, + 0x13, 0x7b, 0x74, 0xf8, 0xd8, 0x3c, 0x2e, 0x22, 0x27, 0x29, 0xf2, 0xec, 0xed, 0x79, 0x1b, 0x29, + 0x70, 0xc3, 0x01, 0x69, 0x09, 0xa1, 0xda, 0x3e, 0x68, 0x4d, 0xd2, 0x20, 0x83, 0x3e, 0x72, 0x53, + 0xc8, 0xc2, 0x76, 0xa3, 0xb7, 0xd2, 0x6f, 0xd8, 0xf7, 0x67, 0xb9, 0x7e, 0x4f, 0x9e, 0x9d, 0x12, + 0x35, 0x9c, 0xa6, 0x34, 0x8f, 0x21, 0x0b, 0x35, 0x17, 0xec, 0xc0, 0x28, 0x22, 0xdf, 0xbb, 0x93, + 0xd4, 0x87, 0x0c, 0xb9, 0x70, 0xcc, 0x50, 0xe6, 0xa2, 0xd3, 0x14, 0x67, 0x67, 0x6d, 0xd0, 0xab, + 0xf5, 0xeb, 0xf6, 0xc3, 0x59, 0xae, 0xf7, 0x04, 0xd1, 0x2b, 0xa1, 0x86, 0xb3, 0xcd, 0x63, 0x5f, + 0xf2, 0xd0, 0x27, 0x45, 0xe4, 0x53, 0x1e, 0xd0, 0xbe, 0x03, 0xfa, 0x0d, 0x59, 0x31, 0xa6, 0x23, + 0x14, 0xc2, 0x29, 0x26, 0x93, 0xac, 0xdd, 0xe4, 0x32, 0x8f, 0x66, 0xb9, 0xfe, 0xde, 0x2b, 0x65, + 0xd4, 0x04, 0xc3, 0xe9, 0x5c, 0x15, 0x7b, 0xaa, 0x84, 0xf7, 0x57, 0x7f, 0xfc, 0x4d, 0x5f, 0x32, + 0x7e, 0x5f, 0x06, 0x77, 0x0f, 0x48, 0x42, 0x51, 0x42, 0x27, 0x54, 0xdc, 0x79, 0x1b, 0x34, 0xaa, + 0xd1, 0xc3, 0x2f, 0x7d, 0x73, 0xb8, 0x7b, 0xad, 0x2d, 0x9f, 0x95, 0x08, 0xbb, 0x5e, 0x1c, 0xe7, + 0x8b, 0xa2, 0xfb, 0xe6, 0x69, 0xda, 0xc7, 0x60, 0x35, 0x23, 0x84, 0xc9, 0xa9, 0xf0, 0x70, 0xa1, + 0x1f, 0xe6, 0x93, 0x68, 0x3a, 0x30, 0x9f, 0xa2, 0xec, 0x79, 0x84, 0x1c, 0x42, 0x98, 0xbd, 0x5a, + 0x10, 0x39, 0x3c, 0x4f, 0xfb, 0xa9, 0x06, 0xb6, 0x12, 0x74, 0xca, 0xdc, 0x6a, 0xf0, 0x52, 0x37, + 0x84, 0x34, 0xe4, 0x37, 0xbf, 0x65, 0x7f, 0x3d, 0xcb, 0xf5, 0xb7, 0x45, 0x15, 0x6e, 0x42, 0x19, + 0xff, 0xe4, 0xfa, 0x87, 0x01, 0x66, 0xe1, 0x64, 0x54, 0xc8, 0xa9, 0x0f, 0x82, 0xb2, 0x8c, 0xf0, + 0x88, 0x5a, 0xa3, 0x33, 0x86, 0xa8, 0x79, 0x88, 0x4e, 0xed, 0x62, 0xe1, 0x68, 0x05, 0xdd, 0x57, + 0x15, 0xdb, 0x21, 0xa4, 0xa1, 0x2c, 0xd4, 0xcf, 0xcb, 0xa0, 0xa5, 0xd6, 0x4f, 0x1b, 0x80, 0x86, + 0x68, 0xed, 0x6a, 0x36, 0xda, 0x5b, 0xb3, 0x5c, 0xdf, 0x14, 0x7f, 0xab, 0x0a, 0x19, 0x4e, 0x5d, + 0xac, 0x8f, 0x7c, 0xcd, 0x03, 0xf5, 0x10, 0x41, 0x1f, 0x65, 0xee, 0x40, 0x56, 0xa6, 0x7f, 0xfb, + 0xbc, 0x3c, 0xe4, 0x19, 0x76, 0xf7, 0x22, 0xd7, 0xd7, 0xc4, 0x7a, 0x30, 0xcb, 0xf5, 0x0d, 0x21, + 0x53, 0xd2, 0x19, 0xce, 0x9a, 0x58, 0x0e, 0x14, 0x91, 0xa1, 0x9c, 0x93, 0xff, 0x4b, 0x64, 0x78, + 0x4d, 0x64, 0x58, 0x89, 0x0c, 0x65, 0x4d, 0x7e, 0x5d, 0x01, 0x77, 0x04, 0x5a, 0x83, 0x60, 0x9d, + 0xe2, 0x20, 0x41, 0xbe, 0x2b, 0x20, 0xb2, 0x71, 0xba, 0xaa, 0x8e, 0x78, 0x22, 0x4f, 0x38, 0x4c, + 0x0a, 0x76, 0xce, 0x73, 0xbd, 0x36, 0x9f, 0x05, 0x0b, 0x14, 0x86, 0xd3, 0xa2, 0x0a, 0x56, 0xfb, + 0x16, 0xac, 0x57, 0xe7, 0xec, 0x52, 0x54, 0x36, 0xd7, 0x0d, 0x12, 0xd5, 0x01, 0x9e, 0x20, 0x66, + 0xb7, 0xe7, 0xf4, 0x0b, 0xe9, 0x86, 0xd3, 0x9a, 0x2a, 0x38, 0x6d, 0x04, 0xc4, 0x83, 0xc0, 0xf5, + 0xf9, 0x30, 0x5b, 0xf9, 0x0f, 0xc3, 0xec, 0x81, 0x1c, 0x66, 0x6f, 0x29, 0x0f, 0x4d, 0xc5, 0x60, + 0x38, 0xeb, 0xd2, 0x21, 0xc7, 0x59, 0x04, 0xb4, 0x12, 0x31, 0x6f, 0x59, 0xf9, 0xc8, 0xdc, 0xb6, + 0x8f, 0x07, 0xb3, 0x5c, 0xdf, 0x59, 0x54, 0x99, 0x73, 0x18, 0xce, 0x9b, 0xd2, 0x39, 0x6f, 0x5e, + 0xe3, 0x73, 0x50, 0x2f, 0x1f, 0x5b, 0xad, 0x03, 0x1a, 0xc9, 0x24, 0x46, 0x59, 0x11, 0xe1, 0x67, + 0xb3, 0xea, 0xcc, 0x1d, 0x5a, 0x0f, 0x34, 0x7d, 0x94, 0x90, 0x18, 0x27, 0x3c, 0xbe, 0xcc, 0xe3, + 0xaa, 0xcb, 0x7e, 0xf6, 0xf2, 0xa2, 0x5b, 0x3b, 0xbf, 0xe8, 0xd6, 0xfe, 0xba, 0xe8, 0xd6, 0x5e, + 0x5c, 0x76, 0x97, 0xce, 0x2f, 0xbb, 0x4b, 0x7f, 0x5c, 0x76, 0x97, 0xbe, 0xd9, 0x57, 0x2e, 0x9a, + 0x47, 0x68, 0x4c, 0xa8, 0x85, 0x47, 0xde, 0x5e, 0xf9, 0x51, 0xb6, 0x57, 0x7e, 0x95, 0x7d, 0xf0, + 0xd1, 0xde, 0xd5, 0x8f, 0xa6, 0xd1, 0x1d, 0x3e, 0x4f, 0x1e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, + 0xf8, 0x0c, 0x1b, 0x17, 0xc6, 0x09, 0x00, 0x00, +} + +func (m *ClientState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllowUpdateAfterMisbehaviour { + i-- + if m.AllowUpdateAfterMisbehaviour { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.AllowUpdateAfterExpiry { + i-- + if m.AllowUpdateAfterExpiry { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if len(m.UpgradePath) > 0 { + for iNdEx := len(m.UpgradePath) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UpgradePath[iNdEx]) + copy(dAtA[i:], m.UpgradePath[iNdEx]) + i = encodeVarintTendermint(dAtA, i, uint64(len(m.UpgradePath[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.ProofSpecs) > 0 { + for iNdEx := len(m.ProofSpecs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ProofSpecs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.LatestHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.FrozenHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + n3, err3 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxClockDrift, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxClockDrift):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintTendermint(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x2a + n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.UnbondingPeriod, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.UnbondingPeriod):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintTendermint(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x22 + n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.TrustingPeriod, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.TrustingPeriod):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintTendermint(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x1a + { + size, err := m.TrustLevel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTendermint(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTendermint(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Root.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintTendermint(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Misbehaviour) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Misbehaviour) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Misbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Header2 != nil { + { + size, err := m.Header2.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Header1 != nil { + { + size, err := m.Header1.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintTendermint(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TrustedValidators != nil { + { + size, err := m.TrustedValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.TrustedHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.SignedHeader != nil { + { + size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTendermint(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Fraction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Fraction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Fraction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Denominator != 0 { + i = encodeVarintTendermint(dAtA, i, uint64(m.Denominator)) + i-- + dAtA[i] = 0x10 + } + if m.Numerator != 0 { + i = encodeVarintTendermint(dAtA, i, uint64(m.Numerator)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTendermint(dAtA []byte, offset int, v uint64) int { + offset -= sovTendermint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClientState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTendermint(uint64(l)) + } + l = m.TrustLevel.Size() + n += 1 + l + sovTendermint(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.TrustingPeriod) + n += 1 + l + sovTendermint(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.UnbondingPeriod) + n += 1 + l + sovTendermint(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxClockDrift) + n += 1 + l + sovTendermint(uint64(l)) + l = m.FrozenHeight.Size() + n += 1 + l + sovTendermint(uint64(l)) + l = m.LatestHeight.Size() + n += 1 + l + sovTendermint(uint64(l)) + if len(m.ProofSpecs) > 0 { + for _, e := range m.ProofSpecs { + l = e.Size() + n += 1 + l + sovTendermint(uint64(l)) + } + } + if len(m.UpgradePath) > 0 { + for _, s := range m.UpgradePath { + l = len(s) + n += 1 + l + sovTendermint(uint64(l)) + } + } + if m.AllowUpdateAfterExpiry { + n += 2 + } + if m.AllowUpdateAfterMisbehaviour { + n += 2 + } + return n +} + +func (m *ConsensusState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTendermint(uint64(l)) + l = m.Root.Size() + n += 1 + l + sovTendermint(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTendermint(uint64(l)) + } + return n +} + +func (m *Misbehaviour) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovTendermint(uint64(l)) + } + if m.Header1 != nil { + l = m.Header1.Size() + n += 1 + l + sovTendermint(uint64(l)) + } + if m.Header2 != nil { + l = m.Header2.Size() + n += 1 + l + sovTendermint(uint64(l)) + } + return n +} + +func (m *Header) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedHeader != nil { + l = m.SignedHeader.Size() + n += 1 + l + sovTendermint(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTendermint(uint64(l)) + } + l = m.TrustedHeight.Size() + n += 1 + l + sovTendermint(uint64(l)) + if m.TrustedValidators != nil { + l = m.TrustedValidators.Size() + n += 1 + l + sovTendermint(uint64(l)) + } + return n +} + +func (m *Fraction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Numerator != 0 { + n += 1 + sovTendermint(uint64(m.Numerator)) + } + if m.Denominator != 0 { + n += 1 + sovTendermint(uint64(m.Denominator)) + } + return n +} + +func sovTendermint(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTendermint(x uint64) (n int) { + return sovTendermint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClientState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustLevel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TrustLevel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustingPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.TrustingPeriod, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnbondingPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.UnbondingPeriod, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxClockDrift", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MaxClockDrift, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrozenHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FrozenHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LatestHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProofSpecs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProofSpecs = append(m.ProofSpecs, &_go.ProofSpec{}) + if err := m.ProofSpecs[len(m.ProofSpecs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpgradePath = append(m.UpgradePath, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterExpiry", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowUpdateAfterExpiry = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowUpdateAfterMisbehaviour", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowUpdateAfterMisbehaviour = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTendermint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTendermint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Root.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTendermint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTendermint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Misbehaviour) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Misbehaviour: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Misbehaviour: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header1", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header1 == nil { + m.Header1 = &Header{} + } + if err := m.Header1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header2", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header2 == nil { + m.Header2 = &Header{} + } + if err := m.Header2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTendermint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTendermint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignedHeader == nil { + m.SignedHeader = &types2.SignedHeader{} + } + if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types2.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TrustedHeight.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustedValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTendermint + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTendermint + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TrustedValidators == nil { + m.TrustedValidators = &types2.ValidatorSet{} + } + if err := m.TrustedValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTendermint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTendermint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Fraction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Fraction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Fraction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Numerator", wireType) + } + m.Numerator = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Numerator |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denominator", wireType) + } + m.Denominator = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTendermint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Denominator |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTendermint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTendermint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTendermint(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTendermint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTendermint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTendermint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTendermint + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTendermint + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTendermint + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTendermint = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTendermint = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTendermint = fmt.Errorf("proto: unexpected end of group") +) diff --git a/light-clients/07-tendermint/types/tendermint_test.go b/light-clients/07-tendermint/types/tendermint_test.go new file mode 100644 index 0000000000..4f9b8142bf --- /dev/null +++ b/light-clients/07-tendermint/types/tendermint_test.go @@ -0,0 +1,95 @@ +package types_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/simapp" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +const ( + chainID = "gaia" + chainIDRevision0 = "gaia-revision-0" + chainIDRevision1 = "gaia-revision-1" + clientID = "gaiamainnet" + trustingPeriod time.Duration = time.Hour * 24 * 7 * 2 + ubdPeriod time.Duration = time.Hour * 24 * 7 * 3 + maxClockDrift time.Duration = time.Second * 10 +) + +var ( + height = clienttypes.NewHeight(0, 4) + newClientHeight = clienttypes.NewHeight(1, 1) + upgradePath = []string{"upgrade", "upgradedIBCState"} +) + +type TendermintTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + + // TODO: deprecate usage in favor of testing package + ctx sdk.Context + cdc codec.Marshaler + privVal tmtypes.PrivValidator + valSet *tmtypes.ValidatorSet + valsHash tmbytes.HexBytes + header *ibctmtypes.Header + now time.Time + headerTime time.Time + clientTime time.Time +} + +func (suite *TendermintTestSuite) SetupTest() { + suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) + suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0)) + suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1)) + // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) + suite.coordinator.CommitNBlocks(suite.chainA, 2) + suite.coordinator.CommitNBlocks(suite.chainB, 2) + + // TODO: deprecate usage in favor of testing package + checkTx := false + app := simapp.Setup(checkTx) + + suite.cdc = app.AppCodec() + + // now is the time of the current chain, must be after the updating header + // mocks ctx.BlockTime() + suite.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + suite.clientTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + // Header time is intended to be time for any new header used for updates + suite.headerTime = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + + suite.privVal = ibctestingmock.NewPV() + + pubKey, err := suite.privVal.GetPubKey() + suite.Require().NoError(err) + + heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1) + + val := tmtypes.NewValidator(pubKey, 10) + suite.valSet = tmtypes.NewValidatorSet([]*tmtypes.Validator{val}) + suite.valsHash = suite.valSet.Hash() + suite.header = suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal}) + suite.ctx = app.BaseApp.NewContext(checkTx, tmproto.Header{Height: 1, Time: suite.now}) +} + +func TestTendermintTestSuite(t *testing.T) { + suite.Run(t, new(TendermintTestSuite)) +} diff --git a/light-clients/07-tendermint/types/update.go b/light-clients/07-tendermint/types/update.go new file mode 100644 index 0000000000..e692e74668 --- /dev/null +++ b/light-clients/07-tendermint/types/update.go @@ -0,0 +1,186 @@ +package types + +import ( + "bytes" + "time" + + "github.com/tendermint/tendermint/light" + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// CheckHeaderAndUpdateState checks if the provided header is valid, and if valid it will: +// create the consensus state for the header.Height +// and update the client state if the header height is greater than the latest client state height +// It returns an error if: +// - the client or header provided are not parseable to tendermint types +// - the header is invalid +// - header height is less than or equal to the trusted header height +// - header revision is not equal to trusted header revision +// - header valset commit verification fails +// - header timestamp is past the trusting period in relation to the consensus state +// - header timestamp is less than or equal to the consensus state timestamp +// +// UpdateClient may be used to either create a consensus state for: +// - a future height greater than the latest client state height +// - a past height that was skipped during bisection +// If we are updating to a past height, a consensus state is created for that height to be persisted in client store +// If we are updating to a future height, the consensus state is created and the client state is updated to reflect +// the new latest height +// UpdateClient must only be used to update within a single revision, thus header revision number and trusted height's revision +// number must be the same. To update to a new revision, use a separate upgrade path +// Tendermint client validity checking uses the bisection algorithm described +// in the [Tendermint spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md). +func (cs ClientState) CheckHeaderAndUpdateState( + ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore, + header exported.Header, +) (exported.ClientState, exported.ConsensusState, error) { + tmHeader, ok := header.(*Header) + if !ok { + return nil, nil, sdkerrors.Wrapf( + clienttypes.ErrInvalidHeader, "expected type %T, got %T", &Header{}, header, + ) + } + + // get consensus state from clientStore + tmConsState, err := GetConsensusState(clientStore, cdc, tmHeader.TrustedHeight) + if err != nil { + return nil, nil, sdkerrors.Wrapf( + err, "could not get consensus state from clientstore at TrustedHeight: %s", tmHeader.TrustedHeight, + ) + } + + if err := checkValidity(&cs, tmConsState, tmHeader, ctx.BlockTime()); err != nil { + return nil, nil, err + } + + newClientState, consensusState := update(ctx, clientStore, &cs, tmHeader) + return newClientState, consensusState, nil +} + +// checkTrustedHeader checks that consensus state matches trusted fields of Header +func checkTrustedHeader(header *Header, consState *ConsensusState) error { + tmTrustedValidators, err := tmtypes.ValidatorSetFromProto(header.TrustedValidators) + if err != nil { + return sdkerrors.Wrap(err, "trusted validator set in not tendermint validator set type") + } + + // assert that trustedVals is NextValidators of last trusted header + // to do this, we check that trustedVals.Hash() == consState.NextValidatorsHash + tvalHash := tmTrustedValidators.Hash() + if !bytes.Equal(consState.NextValidatorsHash, tvalHash) { + return sdkerrors.Wrapf( + ErrInvalidValidatorSet, + "trusted validators %s, does not hash to latest trusted validators. Expected: %X, got: %X", + header.TrustedValidators, consState.NextValidatorsHash, tvalHash, + ) + } + return nil +} + +// checkValidity checks if the Tendermint header is valid. +// CONTRACT: consState.Height == header.TrustedHeight +func checkValidity( + clientState *ClientState, consState *ConsensusState, + header *Header, currentTimestamp time.Time, +) error { + if err := checkTrustedHeader(header, consState); err != nil { + return err + } + + // UpdateClient only accepts updates with a header at the same revision + // as the trusted consensus state + if header.GetHeight().GetRevisionNumber() != header.TrustedHeight.RevisionNumber { + return sdkerrors.Wrapf( + ErrInvalidHeaderHeight, + "header height revision %d does not match trusted header revision %d", + header.GetHeight().GetRevisionNumber(), header.TrustedHeight.RevisionNumber, + ) + } + + tmTrustedValidators, err := tmtypes.ValidatorSetFromProto(header.TrustedValidators) + if err != nil { + return sdkerrors.Wrap(err, "trusted validator set in not tendermint validator set type") + } + + tmSignedHeader, err := tmtypes.SignedHeaderFromProto(header.SignedHeader) + if err != nil { + return sdkerrors.Wrap(err, "signed header in not tendermint signed header type") + } + + tmValidatorSet, err := tmtypes.ValidatorSetFromProto(header.ValidatorSet) + if err != nil { + return sdkerrors.Wrap(err, "validator set in not tendermint validator set type") + } + + // assert header height is newer than consensus state + if header.GetHeight().LTE(header.TrustedHeight) { + return sdkerrors.Wrapf( + clienttypes.ErrInvalidHeader, + "header height ≤ consensus state height (%s ≤ %s)", header.GetHeight(), header.TrustedHeight, + ) + } + + chainID := clientState.GetChainID() + // If chainID is in revision format, then set revision number of chainID with the revision number + // of the header we are verifying + // This is useful if the update is at a previous revision rather than an update to the latest revision + // of the client. + // The chainID must be set correctly for the previous revision before attempting verification. + // Updates for previous revisions are not supported if the chainID is not in revision format. + if clienttypes.IsRevisionFormat(chainID) { + chainID, _ = clienttypes.SetRevisionNumber(chainID, header.GetHeight().GetRevisionNumber()) + } + + // Construct a trusted header using the fields in consensus state + // Only Height, Time, and NextValidatorsHash are necessary for verification + trustedHeader := tmtypes.Header{ + ChainID: chainID, + Height: int64(header.TrustedHeight.RevisionHeight), + Time: consState.Timestamp, + NextValidatorsHash: consState.NextValidatorsHash, + } + signedHeader := tmtypes.SignedHeader{ + Header: &trustedHeader, + } + + // Verify next header with the passed-in trustedVals + // - asserts trusting period not passed + // - assert header timestamp is not past the trusting period + // - assert header timestamp is past latest stored consensus state timestamp + // - assert that a TrustLevel proportion of TrustedValidators signed new Commit + err = light.Verify( + &signedHeader, + tmTrustedValidators, tmSignedHeader, tmValidatorSet, + clientState.TrustingPeriod, currentTimestamp, clientState.MaxClockDrift, clientState.TrustLevel.ToTendermint(), + ) + if err != nil { + return sdkerrors.Wrap(err, "failed to verify header") + } + return nil +} + +// update the consensus state from a new header and set processed time metadata +func update(ctx sdk.Context, clientStore sdk.KVStore, clientState *ClientState, header *Header) (*ClientState, *ConsensusState) { + height := header.GetHeight().(clienttypes.Height) + if height.GT(clientState.LatestHeight) { + clientState.LatestHeight = height + } + consensusState := &ConsensusState{ + Timestamp: header.GetTime(), + Root: commitmenttypes.NewMerkleRoot(header.Header.GetAppHash()), + NextValidatorsHash: header.Header.NextValidatorsHash, + } + + // set context time as processed time as this is state internal to tendermint client logic. + // client state and consensus state will be set by client keeper + SetProcessedTime(clientStore, header.GetHeight(), uint64(ctx.BlockTime().UnixNano())) + + return clientState, consensusState +} diff --git a/light-clients/07-tendermint/types/update_test.go b/light-clients/07-tendermint/types/update_test.go new file mode 100644 index 0000000000..d9e550ed01 --- /dev/null +++ b/light-clients/07-tendermint/types/update_test.go @@ -0,0 +1,281 @@ +package types_test + +import ( + "time" + + tmtypes "github.com/tendermint/tendermint/types" + + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + types "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +func (suite *TendermintTestSuite) TestCheckHeaderAndUpdateState() { + var ( + clientState *types.ClientState + consensusState *types.ConsensusState + consStateHeight clienttypes.Height + newHeader *types.Header + currentTime time.Time + ) + + // Setup different validators and signers for testing different types of updates + altPrivVal := ibctestingmock.NewPV() + altPubKey, err := altPrivVal.GetPubKey() + suite.Require().NoError(err) + + revisionHeight := int64(height.RevisionHeight) + + // create modified heights to use for test-cases + heightPlus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight+1) + heightMinus1 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-1) + heightMinus3 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight-3) + heightPlus5 := clienttypes.NewHeight(height.RevisionNumber, height.RevisionHeight+5) + + altVal := tmtypes.NewValidator(altPubKey, revisionHeight) + + // Create bothValSet with both suite validator and altVal. Would be valid update + bothValSet := tmtypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + // Create alternative validator set with only altVal, invalid update (too much change in valSet) + altValSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{altVal}) + + signers := []tmtypes.PrivValidator{suite.privVal} + + // Create signer array and ensure it is in same order as bothValSet + _, suiteVal := suite.valSet.GetByIndex(0) + bothSigners := ibctesting.CreateSortedSignerArray(altPrivVal, suite.privVal, altVal, suiteVal) + + altSigners := []tmtypes.PrivValidator{altPrivVal} + + testCases := []struct { + name string + setup func() + expPass bool + }{ + { + name: "successful update with next height and same validator set", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expPass: true, + }, + { + name: "successful update with future height and different validator set", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners) + currentTime = suite.now + }, + expPass: true, + }, + { + name: "successful update with next height and different validator set", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), bothValSet.Hash()) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners) + currentTime = suite.now + }, + expPass: true, + }, + { + name: "successful update for a previous height", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + consStateHeight = heightMinus3 + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightMinus1.RevisionHeight), heightMinus3, suite.headerTime, bothValSet, suite.valSet, bothSigners) + currentTime = suite.now + }, + expPass: true, + }, + { + name: "successful update for a previous revision", + setup: func() { + clientState = types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision0, int64(height.RevisionHeight), heightMinus3, suite.headerTime, bothValSet, suite.valSet, bothSigners) + currentTime = suite.now + }, + expPass: true, + }, + { + name: "unsuccessful update with incorrect header chain-id", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader("ethermint", int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful update to a future revision", + setup: func() { + clientState = types.NewClientState(chainIDRevision0, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision1, 1, height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful update: header height revision and trusted height revision mismatch", + setup: func() { + clientState = types.NewClientState(chainIDRevision1, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(1, 1), commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainIDRevision1, 3, height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful update with next height: update header mismatches nextValSetHash", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, bothValSet, suite.valSet, bothSigners) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful update with next height: update header mismatches different nextValSetHash", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), bothValSet.Hash()) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, bothValSet, signers) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful update with future height: too much change in validator set", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, altValSet, suite.valSet, altSigners) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful updates, passed in incorrect trusted validators for given consensus state", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus5.RevisionHeight), height, suite.headerTime, bothValSet, bothValSet, bothSigners) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful update: trusting period has passed since last client timestamp", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + // make current time pass trusting period from last timestamp on clientstate + currentTime = suite.now.Add(trustingPeriod) + }, + expPass: false, + }, + { + name: "unsuccessful update: header timestamp is past current timestamp", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.now.Add(time.Minute), suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "unsuccessful update: header timestamp is not past last client timestamp", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.clientTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expPass: false, + }, + { + name: "header basic validation failed", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightPlus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + // cause new header to fail validatebasic by changing commit height to mismatch header height + newHeader.SignedHeader.Commit.Height = revisionHeight - 1 + currentTime = suite.now + }, + expPass: false, + }, + { + name: "header height < consensus height", + setup: func() { + clientState = types.NewClientState(chainID, types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, clienttypes.NewHeight(height.RevisionNumber, heightPlus5.RevisionHeight), commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + consensusState = types.NewConsensusState(suite.clientTime, commitmenttypes.NewMerkleRoot(suite.header.Header.GetAppHash()), suite.valsHash) + // Make new header at height less than latest client state + newHeader = suite.chainA.CreateTMClientHeader(chainID, int64(heightMinus1.RevisionHeight), height, suite.headerTime, suite.valSet, suite.valSet, signers) + currentTime = suite.now + }, + expPass: false, + }, + } + + for i, tc := range testCases { + tc := tc + + consStateHeight = height // must be explicitly changed + // setup test + tc.setup() + + // Set current timestamp in context + ctx := suite.chainA.GetContext().WithBlockTime(currentTime) + + // Set trusted consensus state in client store + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientConsensusState(ctx, clientID, consStateHeight, consensusState) + + height := newHeader.GetHeight() + expectedConsensus := &types.ConsensusState{ + Timestamp: newHeader.GetTime(), + Root: commitmenttypes.NewMerkleRoot(newHeader.Header.GetAppHash()), + NextValidatorsHash: newHeader.Header.NextValidatorsHash, + } + + newClientState, consensusState, err := clientState.CheckHeaderAndUpdateState( + ctx, + suite.cdc, + suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID), // pass in clientID prefixed clientStore + newHeader, + ) + + if tc.expPass { + suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + + // Determine if clientState should be updated or not + // TODO: check the entire Height struct once GetLatestHeight returns clienttypes.Height + if height.GT(clientState.LatestHeight) { + // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight() + suite.Require().Equal(height, newClientState.GetLatestHeight(), "clientstate height did not update") + } else { + // Update will add past consensus state, clientState should not be updated at all + suite.Require().Equal(clientState.LatestHeight, newClientState.GetLatestHeight(), "client state height updated for past header") + } + + suite.Require().Equal(expectedConsensus, consensusState, "valid test case %d failed: %s", i, tc.name) + } else { + suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + suite.Require().Nil(newClientState, "invalid test case %d passed: %s", i, tc.name) + suite.Require().Nil(consensusState, "invalid test case %d passed: %s", i, tc.name) + } + } +} diff --git a/light-clients/07-tendermint/types/upgrade.go b/light-clients/07-tendermint/types/upgrade.go new file mode 100644 index 0000000000..397e9cfd83 --- /dev/null +++ b/light-clients/07-tendermint/types/upgrade.go @@ -0,0 +1,156 @@ +package types + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +// VerifyUpgradeAndUpdateState checks if the upgraded client has been committed by the current client +// It will zero out all client-specific fields (e.g. TrustingPeriod and verify all data +// in client state that must be the same across all valid Tendermint clients for the new chain. +// VerifyUpgrade will return an error if: +// - the upgradedClient is not a Tendermint ClientState +// - the lastest height of the client state does not have the same revision number or has a greater +// height than the committed client. +// - the height of upgraded client is not greater than that of current client +// - the latest height of the new client does not match or is greater than the height in committed client +// - any Tendermint chain specified parameter in upgraded client such as ChainID, UnbondingPeriod, +// and ProofSpecs do not match parameters set by committed client +func (cs ClientState) VerifyUpgradeAndUpdateState( + ctx sdk.Context, cdc codec.BinaryMarshaler, clientStore sdk.KVStore, + upgradedClient exported.ClientState, upgradedConsState exported.ConsensusState, + proofUpgradeClient, proofUpgradeConsState []byte, +) (exported.ClientState, exported.ConsensusState, error) { + if len(cs.UpgradePath) == 0 { + return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade client, no upgrade path set") + } + + // last height of current counterparty chain must be client's latest height + lastHeight := cs.GetLatestHeight() + + if !upgradedClient.GetLatestHeight().GT(lastHeight) { + return nil, nil, sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "upgraded client height %s must be at greater than current client height %s", + upgradedClient.GetLatestHeight(), lastHeight) + } + + // counterparty chain must commit the upgraded client with all client-customizable fields zeroed out + // at the upgrade path specified by current client + // counterparty must also commit to the upgraded consensus state at a sub-path under the upgrade path specified + tmUpgradeClient, ok := upgradedClient.(*ClientState) + if !ok { + return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "upgraded client must be Tendermint client. expected: %T got: %T", + &ClientState{}, upgradedClient) + } + tmUpgradeConsState, ok := upgradedConsState.(*ConsensusState) + if !ok { + return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "upgraded consensus state must be Tendermint consensus state. expected %T, got: %T", + &ConsensusState{}, upgradedConsState) + } + + // unmarshal proofs + var merkleProofClient, merkleProofConsState commitmenttypes.MerkleProof + if err := cdc.UnmarshalBinaryBare(proofUpgradeClient, &merkleProofClient); err != nil { + return nil, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidProof, "could not unmarshal client merkle proof: %v", err) + } + if err := cdc.UnmarshalBinaryBare(proofUpgradeConsState, &merkleProofConsState); err != nil { + return nil, nil, sdkerrors.Wrapf(commitmenttypes.ErrInvalidProof, "could not unmarshal consensus state merkle proof: %v", err) + } + + // Must prove against latest consensus state to ensure we are verifying against latest upgrade plan + // This verifies that upgrade is intended for the provided revision, since committed client must exist + // at this consensus state + consState, err := GetConsensusState(clientStore, cdc, lastHeight) + if err != nil { + return nil, nil, sdkerrors.Wrap(err, "could not retrieve consensus state for lastHeight") + } + + if cs.IsExpired(consState.Timestamp, ctx.BlockTime()) { + return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidClient, "cannot upgrade an expired client") + } + + // Verify client proof + bz, err := cdc.MarshalInterface(upgradedClient) + if err != nil { + return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClient, "could not marshal client state: %v", err) + } + // construct clientState Merkle path + upgradeClientPath := constructUpgradeClientMerklePath(cs.UpgradePath, lastHeight) + if err := merkleProofClient.VerifyMembership(cs.ProofSpecs, consState.GetRoot(), upgradeClientPath, bz); err != nil { + return nil, nil, sdkerrors.Wrapf(err, "client state proof failed. Path: %s", upgradeClientPath.Pretty()) + } + + // Verify consensus state proof + bz, err = cdc.MarshalInterface(upgradedConsState) + if err != nil { + return nil, nil, sdkerrors.Wrapf(clienttypes.ErrInvalidConsensus, "could not marshal consensus state: %v", err) + } + // construct consensus state Merkle path + upgradeConsStatePath := constructUpgradeConsStateMerklePath(cs.UpgradePath, lastHeight) + if err := merkleProofConsState.VerifyMembership(cs.ProofSpecs, consState.GetRoot(), upgradeConsStatePath, bz); err != nil { + return nil, nil, sdkerrors.Wrapf(err, "consensus state proof failed. Path: %s", upgradeConsStatePath.Pretty()) + } + + // Construct new client state and consensus state + // Relayer chosen client parameters are ignored. + // All chain-chosen parameters come from committed client, all client-chosen parameters + // come from current client. + newClientState := NewClientState( + tmUpgradeClient.ChainId, cs.TrustLevel, cs.TrustingPeriod, tmUpgradeClient.UnbondingPeriod, + cs.MaxClockDrift, tmUpgradeClient.LatestHeight, tmUpgradeClient.ProofSpecs, tmUpgradeClient.UpgradePath, + cs.AllowUpdateAfterExpiry, cs.AllowUpdateAfterMisbehaviour, + ) + + if err := newClientState.Validate(); err != nil { + return nil, nil, sdkerrors.Wrap(err, "updated client state failed basic validation") + } + + // The new consensus state is merely used as a trusted kernel against which headers on the new + // chain can be verified. The root is empty as it cannot be known in advance, thus no proof verification will pass. + // The timestamp and the NextValidatorsHash of the consensus state is the blocktime and NextValidatorsHash + // of the last block committed by the old chain. This will allow the first block of the new chain to be verified against + // the last validators of the old chain so long as it is submitted within the TrustingPeriod of this client. + // NOTE: We do not set processed time for this consensus state since this consensus state should not be used for packet verification + // as the root is empty. The next consensus state submitted using update will be usable for packet-verification. + newConsState := NewConsensusState( + tmUpgradeConsState.Timestamp, commitmenttypes.MerkleRoot{}, tmUpgradeConsState.NextValidatorsHash, + ) + + return newClientState, newConsState, nil +} + +// construct MerklePath for the committed client from upgradePath +func constructUpgradeClientMerklePath(upgradePath []string, lastHeight exported.Height) commitmenttypes.MerklePath { + // copy all elements from upgradePath except final element + clientPath := make([]string, len(upgradePath)-1) + copy(clientPath, upgradePath) + + // append lastHeight and `upgradedClient` to last key of upgradePath and use as lastKey of clientPath + // this will create the IAVL key that is used to store client in upgrade store + lastKey := upgradePath[len(upgradePath)-1] + appendedKey := fmt.Sprintf("%s/%d/%s", lastKey, lastHeight.GetRevisionHeight(), upgradetypes.KeyUpgradedClient) + + clientPath = append(clientPath, appendedKey) + return commitmenttypes.NewMerklePath(clientPath...) +} + +// construct MerklePath for the committed consensus state from upgradePath +func constructUpgradeConsStateMerklePath(upgradePath []string, lastHeight exported.Height) commitmenttypes.MerklePath { + // copy all elements from upgradePath except final element + consPath := make([]string, len(upgradePath)-1) + copy(consPath, upgradePath) + + // append lastHeight and `upgradedClient` to last key of upgradePath and use as lastKey of clientPath + // this will create the IAVL key that is used to store client in upgrade store + lastKey := upgradePath[len(upgradePath)-1] + appendedKey := fmt.Sprintf("%s/%d/%s", lastKey, lastHeight.GetRevisionHeight(), upgradetypes.KeyUpgradedConsState) + + consPath = append(consPath, appendedKey) + return commitmenttypes.NewMerklePath(consPath...) +} diff --git a/light-clients/07-tendermint/types/upgrade_test.go b/light-clients/07-tendermint/types/upgrade_test.go new file mode 100644 index 0000000000..7be3a4943f --- /dev/null +++ b/light-clients/07-tendermint/types/upgrade_test.go @@ -0,0 +1,512 @@ +package types_test + +import ( + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +func (suite *TendermintTestSuite) TestVerifyUpgrade() { + var ( + upgradedClient exported.ClientState + upgradedConsState exported.ConsensusState + lastHeight clienttypes.Height + clientA string + proofUpgradedClient, proofUpgradedConsState []byte + ) + + testCases := []struct { + name string + setup func() + expPass bool + }{ + { + name: "successful upgrade", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: true, + }, + { + name: "successful upgrade to same revision", + setup: func() { + upgradedHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+2)) + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, upgradedHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: true, + }, + + { + name: "unsuccessful upgrade: upgrade height revision height is more than the current client revision height", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is 10 blocks from now + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+10)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: chain-specified parameters do not match committed client", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // change upgradedClient client-specified parameters + upgradedClient = types.NewClientState("wrongchainID", types.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, true) + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: client-specified parameters do not match previous client", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // change upgradedClient client-specified parameters + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, ubdPeriod, ubdPeriod+trustingPeriod, maxClockDrift+5, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, true, false) + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: relayer-submitted consensus state does not match counterparty-committed consensus state", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // change submitted upgradedConsensusState + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("maliciousValidators"), + } + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: client proof unmarshal failed", + setup: func() { + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + proofUpgradedClient = []byte("proof") + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: consensus state proof unmarshal failed", + setup: func() { + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + proofUpgradedConsState = []byte("proof") + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: client proof verification failed", + setup: func() { + // create but do not store upgraded client + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: consensus state proof verification failed", + setup: func() { + // create but do not store upgraded client + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: upgrade path is empty", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + + // SetClientState with empty upgrade path + tmClient, _ := cs.(*types.ClientState) + tmClient.UpgradePath = []string{""} + suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, tmClient) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: upgraded height is not greater than current height", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, height, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: consensus state for upgrade height cannot be found", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+100)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: client is expired", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + // expire chainB's client + suite.chainA.ExpireClient(ubdPeriod) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: updated unbonding period is equal to trusting period", + setup: func() { + + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + { + name: "unsuccessful upgrade: final client is not valid", + setup: func() { + + // new client has smaller unbonding period such that old trusting period is no longer valid + upgradedClient = types.NewClientState("newChainId", types.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath, false, false) + upgradedConsState = &types.ConsensusState{ + NextValidatorsHash: []byte("nextValsHash"), + } + + // upgrade Height is at next block + lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + + // zero custom fields and store in upgrade store + suite.chainB.App.UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClient) + suite.chainB.App.UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsState) + + // commit upgrade store changes and update clients + + suite.coordinator.CommitBlock(suite.chainB) + err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint) + suite.Require().NoError(err) + + cs, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientA) + suite.Require().True(found) + + proofUpgradedClient, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + proofUpgradedConsState, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), cs.GetLatestHeight().GetRevisionHeight()) + }, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + + // reset suite + suite.SetupTest() + + clientA, _ = suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint) + + tc.setup() + + cs := suite.chainA.GetClientState(clientA) + clientStore := suite.chainA.App.IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientA) + + // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient + upgradedClient = upgradedClient.ZeroCustomFields() + + clientState, consensusState, err := cs.VerifyUpgradeAndUpdateState( + suite.chainA.GetContext(), + suite.cdc, + clientStore, + upgradedClient, + upgradedConsState, + proofUpgradedClient, + proofUpgradedConsState, + ) + + if tc.expPass { + suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name) + suite.Require().NotNil(clientState, "verify upgrade failed on valid case: %s", tc.name) + suite.Require().NotNil(consensusState, "verify upgrade failed on valid case: %s", tc.name) + } else { + suite.Require().Error(err, "verify upgrade passed on invalid case: %s", tc.name) + suite.Require().Nil(clientState, "verify upgrade passed on invalid case: %s", tc.name) + + suite.Require().Nil(consensusState, "verify upgrade passed on invalid case: %s", tc.name) + + } + } +} diff --git a/light-clients/09-localhost/doc.go b/light-clients/09-localhost/doc.go new file mode 100644 index 0000000000..40a0f06086 --- /dev/null +++ b/light-clients/09-localhost/doc.go @@ -0,0 +1,5 @@ +/* +Package localhost implements a concrete `ConsensusState`, `Header`, +`Misbehaviour` and `Equivocation` types for the loop-back client. +*/ +package localhost diff --git a/light-clients/09-localhost/module.go b/light-clients/09-localhost/module.go new file mode 100644 index 0000000000..57b9c5bb26 --- /dev/null +++ b/light-clients/09-localhost/module.go @@ -0,0 +1,10 @@ +package localhost + +import ( + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" +) + +// Name returns the IBC client name +func Name() string { + return types.SubModuleName +} diff --git a/light-clients/09-localhost/types/client_state.go b/light-clients/09-localhost/types/client_state.go new file mode 100644 index 0000000000..5a4a41a179 --- /dev/null +++ b/light-clients/09-localhost/types/client_state.go @@ -0,0 +1,346 @@ +package types + +import ( + "bytes" + "encoding/binary" + "reflect" + "strings" + + ics23 "github.com/confio/ics23/go" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var _ exported.ClientState = (*ClientState)(nil) + +// NewClientState creates a new ClientState instance +func NewClientState(chainID string, height clienttypes.Height) *ClientState { + return &ClientState{ + ChainId: chainID, + Height: height, + } +} + +// GetChainID returns an empty string +func (cs ClientState) GetChainID() string { + return cs.ChainId +} + +// ClientType is localhost. +func (cs ClientState) ClientType() string { + return exported.Localhost +} + +// GetLatestHeight returns the latest height stored. +func (cs ClientState) GetLatestHeight() exported.Height { + return cs.Height +} + +// IsFrozen returns false. +func (cs ClientState) IsFrozen() bool { + return false +} + +// GetFrozenHeight returns an uninitialized IBC Height. +func (cs ClientState) GetFrozenHeight() exported.Height { + return clienttypes.ZeroHeight() +} + +// Validate performs a basic validation of the client state fields. +func (cs ClientState) Validate() error { + if strings.TrimSpace(cs.ChainId) == "" { + return sdkerrors.Wrap(sdkerrors.ErrInvalidChainID, "chain id cannot be blank") + } + if cs.Height.RevisionHeight == 0 { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidHeight, "local revision height cannot be zero") + } + return nil +} + +// GetProofSpecs returns nil since localhost does not have to verify proofs +func (cs ClientState) GetProofSpecs() []*ics23.ProofSpec { + return nil +} + +// ZeroCustomFields returns the same client state since there are no custom fields in localhost +func (cs ClientState) ZeroCustomFields() exported.ClientState { + return &cs +} + +// Initialize ensures that initial consensus state for localhost is nil +func (cs ClientState) Initialize(_ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, consState exported.ConsensusState) error { + if consState != nil { + return sdkerrors.Wrap(clienttypes.ErrInvalidConsensus, "initial consensus state for localhost must be nil.") + } + return nil +} + +// ExportMetadata is a no-op for localhost client +func (cs ClientState) ExportMetadata(_ sdk.KVStore) []exported.GenesisMetadata { + return nil +} + +// CheckHeaderAndUpdateState updates the localhost client. It only needs access to the context +func (cs *ClientState) CheckHeaderAndUpdateState( + ctx sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, _ exported.Header, +) (exported.ClientState, exported.ConsensusState, error) { + // use the chain ID from context since the localhost client is from the running chain (i.e self). + cs.ChainId = ctx.ChainID() + revision := clienttypes.ParseChainID(cs.ChainId) + cs.Height = clienttypes.NewHeight(revision, uint64(ctx.BlockHeight())) + return cs, nil, nil +} + +// CheckMisbehaviourAndUpdateState implements ClientState +// Since localhost is the client of the running chain, misbehaviour cannot be submitted to it +// Thus, CheckMisbehaviourAndUpdateState returns an error for localhost +func (cs ClientState) CheckMisbehaviourAndUpdateState( + _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, _ exported.Misbehaviour, +) (exported.ClientState, error) { + return nil, sdkerrors.Wrap(clienttypes.ErrInvalidMisbehaviour, "cannot submit misbehaviour to localhost client") +} + +// CheckSubstituteAndUpdateState returns an error. The localhost cannot be modified by +// proposals. +func (cs ClientState) CheckSubstituteAndUpdateState( + ctx sdk.Context, _ codec.BinaryMarshaler, _, _ sdk.KVStore, + _ exported.ClientState, _ exported.Height, +) (exported.ClientState, error) { + return nil, sdkerrors.Wrap(clienttypes.ErrUpdateClientFailed, "cannot update localhost client with a proposal") +} + +// VerifyUpgradeAndUpdateState returns an error since localhost cannot be upgraded +func (cs ClientState) VerifyUpgradeAndUpdateState( + _ sdk.Context, _ codec.BinaryMarshaler, _ sdk.KVStore, + _ exported.ClientState, _ exported.ConsensusState, _, _ []byte, +) (exported.ClientState, exported.ConsensusState, error) { + return nil, nil, sdkerrors.Wrap(clienttypes.ErrInvalidUpgradeClient, "cannot upgrade localhost client") +} + +// VerifyClientState verifies that the localhost client state is stored locally +func (cs ClientState) VerifyClientState( + store sdk.KVStore, cdc codec.BinaryMarshaler, + _ exported.Height, _ exported.Prefix, _ string, _ []byte, clientState exported.ClientState, +) error { + path := host.KeyClientState + bz := store.Get([]byte(path)) + if bz == nil { + return sdkerrors.Wrapf(clienttypes.ErrFailedClientStateVerification, + "not found for path: %s", path) + } + + selfClient := clienttypes.MustUnmarshalClientState(cdc, bz) + + if !reflect.DeepEqual(selfClient, clientState) { + return sdkerrors.Wrapf(clienttypes.ErrFailedClientStateVerification, + "stored clientState != provided clientState: \n%v\n≠\n%v", + selfClient, clientState, + ) + } + return nil +} + +// VerifyClientConsensusState returns nil since a local host client does not store consensus +// states. +func (cs ClientState) VerifyClientConsensusState( + sdk.KVStore, codec.BinaryMarshaler, + exported.Height, string, exported.Height, exported.Prefix, + []byte, exported.ConsensusState, +) error { + return nil +} + +// VerifyConnectionState verifies a proof of the connection state of the +// specified connection end stored locally. +func (cs ClientState) VerifyConnectionState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + _ exported.Height, + _ exported.Prefix, + _ []byte, + connectionID string, + connectionEnd exported.ConnectionI, +) error { + path := host.ConnectionKey(connectionID) + bz := store.Get(path) + if bz == nil { + return sdkerrors.Wrapf(clienttypes.ErrFailedConnectionStateVerification, "not found for path %s", path) + } + + var prevConnection connectiontypes.ConnectionEnd + err := cdc.UnmarshalBinaryBare(bz, &prevConnection) + if err != nil { + return err + } + + if !reflect.DeepEqual(&prevConnection, connectionEnd) { + return sdkerrors.Wrapf( + clienttypes.ErrFailedConnectionStateVerification, + "connection end ≠ previous stored connection: \n%v\n≠\n%v", connectionEnd, prevConnection, + ) + } + + return nil +} + +// VerifyChannelState verifies a proof of the channel state of the specified +// channel end, under the specified port, stored on the local machine. +func (cs ClientState) VerifyChannelState( + store sdk.KVStore, + cdc codec.BinaryMarshaler, + _ exported.Height, + prefix exported.Prefix, + _ []byte, + portID, + channelID string, + channel exported.ChannelI, +) error { + path := host.ChannelKey(portID, channelID) + bz := store.Get(path) + if bz == nil { + return sdkerrors.Wrapf(clienttypes.ErrFailedChannelStateVerification, "not found for path %s", path) + } + + var prevChannel channeltypes.Channel + err := cdc.UnmarshalBinaryBare(bz, &prevChannel) + if err != nil { + return err + } + + if !reflect.DeepEqual(&prevChannel, channel) { + return sdkerrors.Wrapf( + clienttypes.ErrFailedChannelStateVerification, + "channel end ≠ previous stored channel: \n%v\n≠\n%v", channel, prevChannel, + ) + } + + return nil +} + +// VerifyPacketCommitment verifies a proof of an outgoing packet commitment at +// the specified port, specified channel, and specified sequence. +func (cs ClientState) VerifyPacketCommitment( + store sdk.KVStore, + _ codec.BinaryMarshaler, + _ exported.Height, + _ uint64, + _ uint64, + _ exported.Prefix, + _ []byte, + portID, + channelID string, + sequence uint64, + commitmentBytes []byte, +) error { + path := host.PacketCommitmentKey(portID, channelID, sequence) + + data := store.Get(path) + if len(data) == 0 { + return sdkerrors.Wrapf(clienttypes.ErrFailedPacketCommitmentVerification, "not found for path %s", path) + } + + if !bytes.Equal(data, commitmentBytes) { + return sdkerrors.Wrapf( + clienttypes.ErrFailedPacketCommitmentVerification, + "commitment ≠ previous commitment: \n%X\n≠\n%X", commitmentBytes, data, + ) + } + + return nil +} + +// VerifyPacketAcknowledgement verifies a proof of an incoming packet +// acknowledgement at the specified port, specified channel, and specified sequence. +func (cs ClientState) VerifyPacketAcknowledgement( + store sdk.KVStore, + _ codec.BinaryMarshaler, + _ exported.Height, + _ uint64, + _ uint64, + _ exported.Prefix, + _ []byte, + portID, + channelID string, + sequence uint64, + acknowledgement []byte, +) error { + path := host.PacketAcknowledgementKey(portID, channelID, sequence) + + data := store.Get(path) + if len(data) == 0 { + return sdkerrors.Wrapf(clienttypes.ErrFailedPacketAckVerification, "not found for path %s", path) + } + + if !bytes.Equal(data, acknowledgement) { + return sdkerrors.Wrapf( + clienttypes.ErrFailedPacketAckVerification, + "ak bytes ≠ previous ack: \n%X\n≠\n%X", acknowledgement, data, + ) + } + + return nil +} + +// VerifyPacketReceiptAbsence verifies a proof of the absence of an +// incoming packet receipt at the specified port, specified channel, and +// specified sequence. +func (cs ClientState) VerifyPacketReceiptAbsence( + store sdk.KVStore, + _ codec.BinaryMarshaler, + _ exported.Height, + _ uint64, + _ uint64, + _ exported.Prefix, + _ []byte, + portID, + channelID string, + sequence uint64, +) error { + path := host.PacketReceiptKey(portID, channelID, sequence) + + data := store.Get(path) + if data != nil { + return sdkerrors.Wrap(clienttypes.ErrFailedPacketReceiptVerification, "expected no packet receipt") + } + + return nil +} + +// VerifyNextSequenceRecv verifies a proof of the next sequence number to be +// received of the specified channel at the specified port. +func (cs ClientState) VerifyNextSequenceRecv( + store sdk.KVStore, + _ codec.BinaryMarshaler, + _ exported.Height, + _ uint64, + _ uint64, + _ exported.Prefix, + _ []byte, + portID, + channelID string, + nextSequenceRecv uint64, +) error { + path := host.NextSequenceRecvKey(portID, channelID) + + data := store.Get(path) + if len(data) == 0 { + return sdkerrors.Wrapf(clienttypes.ErrFailedNextSeqRecvVerification, "not found for path %s", path) + } + + prevSequenceRecv := binary.BigEndian.Uint64(data) + if prevSequenceRecv != nextSequenceRecv { + return sdkerrors.Wrapf( + clienttypes.ErrFailedNextSeqRecvVerification, + "next sequence receive ≠ previous stored sequence (%d ≠ %d)", nextSequenceRecv, prevSequenceRecv, + ) + } + + return nil +} diff --git a/light-clients/09-localhost/types/client_state_test.go b/light-clients/09-localhost/types/client_state_test.go new file mode 100644 index 0000000000..bc58f62539 --- /dev/null +++ b/light-clients/09-localhost/types/client_state_test.go @@ -0,0 +1,520 @@ +package types_test + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types" +) + +const ( + testConnectionID = "connectionid" + testPortID = "testportid" + testChannelID = "testchannelid" + testSequence = 1 +) + +func (suite *LocalhostTestSuite) TestValidate() { + testCases := []struct { + name string + clientState *types.ClientState + expPass bool + }{ + { + name: "valid client", + clientState: types.NewClientState("chainID", clienttypes.NewHeight(3, 10)), + expPass: true, + }, + { + name: "invalid chain id", + clientState: types.NewClientState(" ", clienttypes.NewHeight(3, 10)), + expPass: false, + }, + { + name: "invalid height", + clientState: types.NewClientState("chainID", clienttypes.ZeroHeight()), + expPass: false, + }, + } + + for _, tc := range testCases { + err := tc.clientState.Validate() + if tc.expPass { + suite.Require().NoError(err, tc.name) + } else { + suite.Require().Error(err, tc.name) + } + } +} + +func (suite *LocalhostTestSuite) TestInitialize() { + testCases := []struct { + name string + consState exported.ConsensusState + expPass bool + }{ + { + "valid initialization", + nil, + true, + }, + { + "invalid consenus state", + &ibctmtypes.ConsensusState{}, + false, + }, + } + + clientState := types.NewClientState("chainID", clienttypes.NewHeight(3, 10)) + + for _, tc := range testCases { + err := clientState.Initialize(suite.ctx, suite.cdc, suite.store, tc.consState) + + if tc.expPass { + suite.Require().NoError(err, "valid testcase: %s failed", tc.name) + } else { + suite.Require().Error(err, "invalid testcase: %s passed", tc.name) + } + } +} + +func (suite *LocalhostTestSuite) TestVerifyClientState() { + clientState := types.NewClientState("chainID", clientHeight) + invalidClient := types.NewClientState("chainID", clienttypes.NewHeight(0, 12)) + + testCases := []struct { + name string + clientState *types.ClientState + malleate func() + counterparty *types.ClientState + expPass bool + }{ + { + name: "proof verification success", + clientState: clientState, + malleate: func() { + bz := clienttypes.MustMarshalClientState(suite.cdc, clientState) + suite.store.Set(host.ClientStateKey(), bz) + }, + counterparty: clientState, + expPass: true, + }, + { + name: "proof verification failed: invalid client", + clientState: clientState, + malleate: func() { + bz := clienttypes.MustMarshalClientState(suite.cdc, clientState) + suite.store.Set(host.ClientStateKey(), bz) + }, + counterparty: invalidClient, + expPass: false, + }, + { + name: "proof verification failed: client not stored", + clientState: clientState, + malleate: func() {}, + counterparty: clientState, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + tc.malleate() + + err := tc.clientState.VerifyClientState( + suite.store, suite.cdc, clienttypes.NewHeight(0, 10), nil, "", []byte{}, tc.counterparty, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } + +} + +func (suite *LocalhostTestSuite) TestVerifyClientConsensusState() { + clientState := types.NewClientState("chainID", clientHeight) + err := clientState.VerifyClientConsensusState( + nil, nil, nil, "", nil, nil, nil, nil, + ) + suite.Require().NoError(err) +} + +func (suite *LocalhostTestSuite) TestCheckHeaderAndUpdateState() { + clientState := types.NewClientState("chainID", clientHeight) + cs, _, err := clientState.CheckHeaderAndUpdateState(suite.ctx, nil, nil, nil) + suite.Require().NoError(err) + suite.Require().Equal(uint64(0), cs.GetLatestHeight().GetRevisionNumber()) + suite.Require().Equal(suite.ctx.BlockHeight(), int64(cs.GetLatestHeight().GetRevisionHeight())) + suite.Require().Equal(suite.ctx.BlockHeader().ChainID, clientState.ChainId) +} + +func (suite *LocalhostTestSuite) TestMisbehaviourAndUpdateState() { + clientState := types.NewClientState("chainID", clientHeight) + cs, err := clientState.CheckMisbehaviourAndUpdateState(suite.ctx, nil, nil, nil) + suite.Require().Error(err) + suite.Require().Nil(cs) +} + +func (suite *LocalhostTestSuite) TestProposedHeaderAndUpdateState() { + clientState := types.NewClientState("chainID", clientHeight) + cs, err := clientState.CheckSubstituteAndUpdateState(suite.ctx, nil, nil, nil, nil, nil) + suite.Require().Error(err) + suite.Require().Nil(cs) +} + +func (suite *LocalhostTestSuite) TestVerifyConnectionState() { + counterparty := connectiontypes.NewCounterparty("clientB", testConnectionID, commitmenttypes.NewMerklePrefix([]byte("ibc"))) + conn1 := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, []*connectiontypes.Version{connectiontypes.NewVersion("1", nil)}, 0) + conn2 := connectiontypes.NewConnectionEnd(connectiontypes.OPEN, "clientA", counterparty, []*connectiontypes.Version{connectiontypes.NewVersion("2", nil)}, 0) + + testCases := []struct { + name string + clientState *types.ClientState + malleate func() + connection connectiontypes.ConnectionEnd + expPass bool + }{ + { + name: "proof verification success", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + bz, err := suite.cdc.MarshalBinaryBare(&conn1) + suite.Require().NoError(err) + suite.store.Set(host.ConnectionKey(testConnectionID), bz) + }, + connection: conn1, + expPass: true, + }, + { + name: "proof verification failed: connection not stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() {}, + connection: conn1, + expPass: false, + }, + { + name: "proof verification failed: unmarshal error", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + suite.store.Set(host.ConnectionKey(testConnectionID), []byte("connection")) + }, + connection: conn1, + expPass: false, + }, + { + name: "proof verification failed: different connection stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + bz, err := suite.cdc.MarshalBinaryBare(&conn2) + suite.Require().NoError(err) + suite.store.Set(host.ConnectionKey(testConnectionID), bz) + }, + connection: conn1, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + tc.malleate() + + err := tc.clientState.VerifyConnectionState( + suite.store, suite.cdc, clientHeight, nil, []byte{}, testConnectionID, &tc.connection, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *LocalhostTestSuite) TestVerifyChannelState() { + counterparty := channeltypes.NewCounterparty(testPortID, testChannelID) + ch1 := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "1.0.0") + ch2 := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, counterparty, []string{testConnectionID}, "2.0.0") + + testCases := []struct { + name string + clientState *types.ClientState + malleate func() + channel channeltypes.Channel + expPass bool + }{ + { + name: "proof verification success", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + bz, err := suite.cdc.MarshalBinaryBare(&ch1) + suite.Require().NoError(err) + suite.store.Set(host.ChannelKey(testPortID, testChannelID), bz) + }, + channel: ch1, + expPass: true, + }, + { + name: "proof verification failed: channel not stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() {}, + channel: ch1, + expPass: false, + }, + { + name: "proof verification failed: unmarshal failed", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + suite.store.Set(host.ChannelKey(testPortID, testChannelID), []byte("channel")) + + }, + channel: ch1, + expPass: false, + }, + { + name: "proof verification failed: different channel stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + bz, err := suite.cdc.MarshalBinaryBare(&ch2) + suite.Require().NoError(err) + suite.store.Set(host.ChannelKey(testPortID, testChannelID), bz) + + }, + channel: ch1, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + tc.malleate() + + err := tc.clientState.VerifyChannelState( + suite.store, suite.cdc, clientHeight, nil, []byte{}, testPortID, testChannelID, &tc.channel, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *LocalhostTestSuite) TestVerifyPacketCommitment() { + testCases := []struct { + name string + clientState *types.ClientState + malleate func() + commitment []byte + expPass bool + }{ + { + name: "proof verification success", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + suite.store.Set( + host.PacketCommitmentKey(testPortID, testChannelID, testSequence), []byte("commitment"), + ) + }, + commitment: []byte("commitment"), + expPass: true, + }, + { + name: "proof verification failed: different commitment stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + suite.store.Set( + host.PacketCommitmentKey(testPortID, testChannelID, testSequence), []byte("different"), + ) + }, + commitment: []byte("commitment"), + expPass: false, + }, + { + name: "proof verification failed: no commitment stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() {}, + commitment: []byte{}, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + tc.malleate() + + err := tc.clientState.VerifyPacketCommitment( + suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, testSequence, tc.commitment, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *LocalhostTestSuite) TestVerifyPacketAcknowledgement() { + testCases := []struct { + name string + clientState *types.ClientState + malleate func() + ack []byte + expPass bool + }{ + { + name: "proof verification success", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + suite.store.Set( + host.PacketAcknowledgementKey(testPortID, testChannelID, testSequence), []byte("acknowledgement"), + ) + }, + ack: []byte("acknowledgement"), + expPass: true, + }, + { + name: "proof verification failed: different ack stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + suite.store.Set( + host.PacketAcknowledgementKey(testPortID, testChannelID, testSequence), []byte("different"), + ) + }, + ack: []byte("acknowledgement"), + expPass: false, + }, + { + name: "proof verification failed: no commitment stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() {}, + ack: []byte{}, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + tc.malleate() + + err := tc.clientState.VerifyPacketAcknowledgement( + suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, testSequence, tc.ack, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} + +func (suite *LocalhostTestSuite) TestVerifyPacketReceiptAbsence() { + clientState := types.NewClientState("chainID", clientHeight) + + err := clientState.VerifyPacketReceiptAbsence( + suite.store, suite.cdc, clientHeight, 0, 0, nil, nil, testPortID, testChannelID, testSequence, + ) + + suite.Require().NoError(err, "receipt absence failed") + + suite.store.Set(host.PacketReceiptKey(testPortID, testChannelID, testSequence), []byte("receipt")) + + err = clientState.VerifyPacketReceiptAbsence( + suite.store, suite.cdc, clientHeight, 0, 0, nil, nil, testPortID, testChannelID, testSequence, + ) + suite.Require().Error(err, "receipt exists in store") +} + +func (suite *LocalhostTestSuite) TestVerifyNextSeqRecv() { + nextSeqRecv := uint64(5) + + testCases := []struct { + name string + clientState *types.ClientState + malleate func() + nextSeqRecv uint64 + expPass bool + }{ + { + name: "proof verification success", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + suite.store.Set( + host.NextSequenceRecvKey(testPortID, testChannelID), + sdk.Uint64ToBigEndian(nextSeqRecv), + ) + }, + nextSeqRecv: nextSeqRecv, + expPass: true, + }, + { + name: "proof verification failed: different nextSeqRecv stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() { + suite.store.Set( + host.NextSequenceRecvKey(testPortID, testChannelID), + sdk.Uint64ToBigEndian(3), + ) + }, + nextSeqRecv: nextSeqRecv, + expPass: false, + }, + { + name: "proof verification failed: no nextSeqRecv stored", + clientState: types.NewClientState("chainID", clientHeight), + malleate: func() {}, + nextSeqRecv: nextSeqRecv, + expPass: false, + }, + } + + for _, tc := range testCases { + tc := tc + + suite.Run(tc.name, func() { + suite.SetupTest() + tc.malleate() + + err := tc.clientState.VerifyNextSequenceRecv( + suite.store, suite.cdc, clientHeight, 0, 0, nil, []byte{}, testPortID, testChannelID, nextSeqRecv, + ) + + if tc.expPass { + suite.Require().NoError(err) + } else { + suite.Require().Error(err) + } + }) + } +} diff --git a/light-clients/09-localhost/types/codec.go b/light-clients/09-localhost/types/codec.go new file mode 100644 index 0000000000..b338dfb699 --- /dev/null +++ b/light-clients/09-localhost/types/codec.go @@ -0,0 +1,15 @@ +package types + +import ( + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +// RegisterInterfaces register the ibc interfaces submodule implementations to protobuf +// Any. +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*exported.ClientState)(nil), + &ClientState{}, + ) +} diff --git a/light-clients/09-localhost/types/errors.go b/light-clients/09-localhost/types/errors.go new file mode 100644 index 0000000000..57ad7c1f6a --- /dev/null +++ b/light-clients/09-localhost/types/errors.go @@ -0,0 +1,10 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// Localhost sentinel errors +var ( + ErrConsensusStatesNotStored = sdkerrors.Register(SubModuleName, 2, "localhost does not store consensus states") +) diff --git a/light-clients/09-localhost/types/keys.go b/light-clients/09-localhost/types/keys.go new file mode 100644 index 0000000000..2fe7c7e48f --- /dev/null +++ b/light-clients/09-localhost/types/keys.go @@ -0,0 +1,6 @@ +package types + +const ( + // SubModuleName for the localhost (loopback) client + SubModuleName = "localhost" +) diff --git a/light-clients/09-localhost/types/localhost.pb.go b/light-clients/09-localhost/types/localhost.pb.go new file mode 100644 index 0000000000..bf2ec3a5b9 --- /dev/null +++ b/light-clients/09-localhost/types/localhost.pb.go @@ -0,0 +1,369 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibcgo/lightclients/localhost/v1/localhost.proto + +package types + +import ( + fmt "fmt" + types "github.com/cosmos/ibc-go/core/02-client/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ClientState defines a loopback (localhost) client. It requires (read-only) +// access to keys outside the client prefix. +type ClientState struct { + // self chain ID + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty" yaml:"chain_id"` + // self latest block height + Height types.Height `protobuf:"bytes,2,opt,name=height,proto3" json:"height"` +} + +func (m *ClientState) Reset() { *m = ClientState{} } +func (m *ClientState) String() string { return proto.CompactTextString(m) } +func (*ClientState) ProtoMessage() {} +func (*ClientState) Descriptor() ([]byte, []int) { + return fileDescriptor_1a6dbd867337bf2e, []int{0} +} +func (m *ClientState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ClientState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ClientState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientState.Merge(m, src) +} +func (m *ClientState) XXX_Size() int { + return m.Size() +} +func (m *ClientState) XXX_DiscardUnknown() { + xxx_messageInfo_ClientState.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientState proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClientState)(nil), "ibcgo.lightclients.localhost.v1.ClientState") +} + +func init() { + proto.RegisterFile("ibcgo/lightclients/localhost/v1/localhost.proto", fileDescriptor_1a6dbd867337bf2e) +} + +var fileDescriptor_1a6dbd867337bf2e = []byte{ + // 275 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcf, 0x4c, 0x4a, 0x4e, + 0xcf, 0xd7, 0xcf, 0xc9, 0x4c, 0xcf, 0x28, 0x49, 0xce, 0xc9, 0x4c, 0xcd, 0x2b, 0x29, 0xd6, 0xcf, + 0xc9, 0x4f, 0x4e, 0xcc, 0xc9, 0xc8, 0x2f, 0x2e, 0xd1, 0x2f, 0x33, 0x44, 0x70, 0xf4, 0x0a, 0x8a, + 0xf2, 0x4b, 0xf2, 0x85, 0xe4, 0xc1, 0x1a, 0xf4, 0x90, 0x35, 0xe8, 0x21, 0xd4, 0x94, 0x19, 0x4a, + 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0xd5, 0xea, 0x83, 0x58, 0x10, 0x6d, 0x52, 0x8a, 0x10, 0x7b, + 0x92, 0xf3, 0x8b, 0x52, 0xf5, 0x21, 0xda, 0x40, 0x86, 0x43, 0x58, 0x10, 0x25, 0x4a, 0xf5, 0x5c, + 0xdc, 0xce, 0x60, 0x7e, 0x70, 0x49, 0x62, 0x49, 0xaa, 0x90, 0x1e, 0x17, 0x47, 0x72, 0x46, 0x62, + 0x66, 0x5e, 0x7c, 0x66, 0x8a, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xf0, 0xa7, 0x7b, 0xf2, + 0xfc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x30, 0x19, 0xa5, 0x20, 0x76, 0x30, 0xd3, 0x33, 0x45, + 0xc8, 0x8a, 0x8b, 0x2d, 0x23, 0x15, 0xe4, 0x2a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x19, + 0x3d, 0x88, 0x4b, 0x41, 0x56, 0xea, 0x41, 0x2d, 0x2a, 0x33, 0xd4, 0xf3, 0x00, 0xab, 0x71, 0x62, + 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xc3, 0x8a, 0xa5, 0x63, 0x81, 0x3c, 0x83, 0x53, 0xf0, + 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, + 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x59, 0xa6, 0x67, 0x96, 0x64, 0x94, + 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x27, 0xe7, 0x17, 0xe7, 0xe6, 0x17, 0x83, 0xc2, 0x4d, 0x17, + 0x16, 0x70, 0xba, 0xb0, 0x90, 0x33, 0xb0, 0xd4, 0x45, 0x04, 0x5e, 0x49, 0x65, 0x41, 0x6a, 0x71, + 0x12, 0x1b, 0xd8, 0x73, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x65, 0xa2, 0xe9, 0xaa, 0x69, + 0x01, 0x00, 0x00, +} + +func (m *ClientState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Height.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLocalhost(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintLocalhost(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintLocalhost(dAtA []byte, offset int, v uint64) int { + offset -= sovLocalhost(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClientState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovLocalhost(uint64(l)) + } + l = m.Height.Size() + n += 1 + l + sovLocalhost(uint64(l)) + return n +} + +func sovLocalhost(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLocalhost(x uint64) (n int) { + return sovLocalhost(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ClientState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLocalhost + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLocalhost + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLocalhost + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLocalhost + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLocalhost + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLocalhost + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLocalhost + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Height.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLocalhost(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLocalhost + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLocalhost(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLocalhost + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLocalhost + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLocalhost + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLocalhost + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLocalhost + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLocalhost + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLocalhost = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLocalhost = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLocalhost = fmt.Errorf("proto: unexpected end of group") +) diff --git a/light-clients/09-localhost/types/localhost_test.go b/light-clients/09-localhost/types/localhost_test.go new file mode 100644 index 0000000000..8ebaef843b --- /dev/null +++ b/light-clients/09-localhost/types/localhost_test.go @@ -0,0 +1,43 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/simapp" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +const ( + height = 4 +) + +var ( + clientHeight = clienttypes.NewHeight(0, 10) +) + +type LocalhostTestSuite struct { + suite.Suite + + cdc codec.Marshaler + ctx sdk.Context + store sdk.KVStore +} + +func (suite *LocalhostTestSuite) SetupTest() { + isCheckTx := false + app := simapp.Setup(isCheckTx) + + suite.cdc = app.AppCodec() + suite.ctx = app.BaseApp.NewContext(isCheckTx, tmproto.Header{Height: 1, ChainID: "ibc-chain"}) + suite.store = app.IBCKeeper.ClientKeeper.ClientStore(suite.ctx, exported.Localhost) +} + +func TestLocalhostTestSuite(t *testing.T) { + suite.Run(t, new(LocalhostTestSuite)) +} diff --git a/proto/ibcgo/apps/transfer/v1/genesis.proto b/proto/ibcgo/apps/transfer/v1/genesis.proto new file mode 100644 index 0000000000..50a681797b --- /dev/null +++ b/proto/ibcgo/apps/transfer/v1/genesis.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package ibcgo.apps.transfer.v1; + +option go_package = "github.com/cosmos/ibc-go/apps/transfer/types"; + +import "ibcgo/apps/transfer/v1/transfer.proto"; +import "gogoproto/gogo.proto"; + +// GenesisState defines the ibc-transfer genesis state +message GenesisState { + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + repeated DenomTrace denom_traces = 2 [ + (gogoproto.castrepeated) = "Traces", + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"denom_traces\"" + ]; + Params params = 3 [ (gogoproto.nullable) = false ]; +} diff --git a/proto/ibcgo/apps/transfer/v1/query.proto b/proto/ibcgo/apps/transfer/v1/query.proto new file mode 100644 index 0000000000..f7dcb5f80d --- /dev/null +++ b/proto/ibcgo/apps/transfer/v1/query.proto @@ -0,0 +1,68 @@ +syntax = "proto3"; + +package ibcgo.apps.transfer.v1; + +import "gogoproto/gogo.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "ibcgo/apps/transfer/v1/transfer.proto"; +import "google/api/annotations.proto"; + +option go_package = "github.com/cosmos/ibc-go/apps/transfer/types"; + +// Query provides defines the gRPC querier service. +service Query { + // DenomTrace queries a denomination trace information. + rpc DenomTrace(QueryDenomTraceRequest) returns (QueryDenomTraceResponse) { + option (google.api.http).get = "/ibc/apps/transfer/v1/denom_traces/{hash}"; + } + + // DenomTraces queries all denomination traces. + rpc DenomTraces(QueryDenomTracesRequest) returns (QueryDenomTracesResponse) { + option (google.api.http).get = "/ibc/apps/transfer/v1/denom_traces"; + } + + // Params queries all parameters of the ibc-transfer module. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/ibc/apps/transfer/v1/params"; + } +} + +// QueryDenomTraceRequest is the request type for the Query/DenomTrace RPC +// method +message QueryDenomTraceRequest { + // hash (in hex format) of the denomination trace information. + string hash = 1; +} + +// QueryDenomTraceResponse is the response type for the Query/DenomTrace RPC +// method. +message QueryDenomTraceResponse { + // denom_trace returns the requested denomination trace information. + DenomTrace denom_trace = 1; +} + +// QueryConnectionsRequest is the request type for the Query/DenomTraces RPC +// method +message QueryDenomTracesRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryConnectionsResponse is the response type for the Query/DenomTraces RPC +// method. +message QueryDenomTracesResponse { + // denom_traces returns all denominations trace information. + repeated DenomTrace denom_traces = 1 + [ (gogoproto.castrepeated) = "Traces", (gogoproto.nullable) = false ]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is the response type for the Query/Params RPC method. +message QueryParamsResponse { + // params defines the parameters of the module. + Params params = 1; +} diff --git a/proto/ibcgo/apps/transfer/v1/transfer.proto b/proto/ibcgo/apps/transfer/v1/transfer.proto new file mode 100644 index 0000000000..78c9ed91a0 --- /dev/null +++ b/proto/ibcgo/apps/transfer/v1/transfer.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +package ibcgo.apps.transfer.v1; + +option go_package = "github.com/cosmos/ibc-go/apps/transfer/types"; + +import "gogoproto/gogo.proto"; + +// FungibleTokenPacketData defines a struct for the packet payload +// See FungibleTokenPacketData spec: +// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures +message FungibleTokenPacketData { + // the token denomination to be transferred + string denom = 1; + // the token amount to be transferred + uint64 amount = 2; + // the sender address + string sender = 3; + // the recipient address on the destination chain + string receiver = 4; +} + +// DenomTrace contains the base denomination for ICS20 fungible tokens and the +// source tracing information path. +message DenomTrace { + // path defines the chain of port/channel identifiers used for tracing the + // source of the fungible token. + string path = 1; + // base denomination of the relayed fungible token. + string base_denom = 2; +} + +// Params defines the set of IBC transfer parameters. +// NOTE: To prevent a single token from being transferred, set the +// TransfersEnabled parameter to true and then set the bank module's SendEnabled +// parameter for the denomination to false. +message Params { + // send_enabled enables or disables all cross-chain token transfers from this + // chain. + bool send_enabled = 1 [ (gogoproto.moretags) = "yaml:\"send_enabled\"" ]; + // receive_enabled enables or disables all cross-chain token transfers to this + // chain. + bool receive_enabled = 2 + [ (gogoproto.moretags) = "yaml:\"receive_enabled\"" ]; +} diff --git a/proto/ibcgo/apps/transfer/v1/tx.proto b/proto/ibcgo/apps/transfer/v1/tx.proto new file mode 100644 index 0000000000..a6b6a5d69a --- /dev/null +++ b/proto/ibcgo/apps/transfer/v1/tx.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package ibcgo.apps.transfer.v1; + +option go_package = "github.com/cosmos/ibc-go/apps/transfer/types"; + +import "gogoproto/gogo.proto"; +import "cosmos/base/v1beta1/coin.proto"; +import "ibcgo/core/client/v1/client.proto"; + +// Msg defines the ibc/transfer Msg service. +service Msg { + // Transfer defines a rpc handler method for MsgTransfer. + rpc Transfer(MsgTransfer) returns (MsgTransferResponse); +} + +// MsgTransfer defines a msg to transfer fungible tokens (i.e Coins) between +// ICS20 enabled chains. See ICS Spec here: +// https://github.com/cosmos/ics/tree/master/spec/ics-020-fungible-token-transfer#data-structures +message MsgTransfer { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + // the port on which the packet will be sent + string source_port = 1 [ (gogoproto.moretags) = "yaml:\"source_port\"" ]; + // the channel by which the packet will be sent + string source_channel = 2 + [ (gogoproto.moretags) = "yaml:\"source_channel\"" ]; + // the tokens to be transferred + cosmos.base.v1beta1.Coin token = 3 [ (gogoproto.nullable) = false ]; + // the sender address + string sender = 4; + // the recipient address on the destination chain + string receiver = 5; + // Timeout height relative to the current block height. + // The timeout is disabled when set to 0. + ibcgo.core.client.v1.Height timeout_height = 6 [ + (gogoproto.moretags) = "yaml:\"timeout_height\"", + (gogoproto.nullable) = false + ]; + // Timeout timestamp (in nanoseconds) relative to the current block timestamp. + // The timeout is disabled when set to 0. + uint64 timeout_timestamp = 7 + [ (gogoproto.moretags) = "yaml:\"timeout_timestamp\"" ]; +} + +// MsgTransferResponse defines the Msg/Transfer response type. +message MsgTransferResponse {} diff --git a/proto/ibcgo/core/channel/v1/channel.proto b/proto/ibcgo/core/channel/v1/channel.proto new file mode 100644 index 0000000000..459e852d6b --- /dev/null +++ b/proto/ibcgo/core/channel/v1/channel.proto @@ -0,0 +1,157 @@ +syntax = "proto3"; + +package ibcgo.core.channel.v1; + +option go_package = "github.com/cosmos/ibc-go/core/04-channel/types"; + +import "gogoproto/gogo.proto"; +import "ibcgo/core/client/v1/client.proto"; + +// Channel defines pipeline for exactly-once packet delivery between specific +// modules on separate blockchains, which has at least one end capable of +// sending packets and one end capable of receiving packets. +message Channel { + option (gogoproto.goproto_getters) = false; + + // current state of the channel end + State state = 1; + // whether the channel is ordered or unordered + Order ordering = 2; + // counterparty channel end + Counterparty counterparty = 3 [ (gogoproto.nullable) = false ]; + // list of connection identifiers, in order, along which packets sent on + // this channel will travel + repeated string connection_hops = 4 + [ (gogoproto.moretags) = "yaml:\"connection_hops\"" ]; + // opaque channel version, which is agreed upon during the handshake + string version = 5; +} + +// IdentifiedChannel defines a channel with additional port and channel +// identifier fields. +message IdentifiedChannel { + option (gogoproto.goproto_getters) = false; + + // current state of the channel end + State state = 1; + // whether the channel is ordered or unordered + Order ordering = 2; + // counterparty channel end + Counterparty counterparty = 3 [ (gogoproto.nullable) = false ]; + // list of connection identifiers, in order, along which packets sent on + // this channel will travel + repeated string connection_hops = 4 + [ (gogoproto.moretags) = "yaml:\"connection_hops\"" ]; + // opaque channel version, which is agreed upon during the handshake + string version = 5; + // port identifier + string port_id = 6; + // channel identifier + string channel_id = 7; +} + +// State defines if a channel is in one of the following states: +// CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED. +enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Default State + STATE_UNINITIALIZED_UNSPECIFIED = 0 + [ (gogoproto.enumvalue_customname) = "UNINITIALIZED" ]; + // A channel has just started the opening handshake. + STATE_INIT = 1 [ (gogoproto.enumvalue_customname) = "INIT" ]; + // A channel has acknowledged the handshake step on the counterparty chain. + STATE_TRYOPEN = 2 [ (gogoproto.enumvalue_customname) = "TRYOPEN" ]; + // A channel has completed the handshake. Open channels are + // ready to send and receive packets. + STATE_OPEN = 3 [ (gogoproto.enumvalue_customname) = "OPEN" ]; + // A channel has been closed and can no longer be used to send or receive + // packets. + STATE_CLOSED = 4 [ (gogoproto.enumvalue_customname) = "CLOSED" ]; +} + +// Order defines if a channel is ORDERED or UNORDERED +enum Order { + option (gogoproto.goproto_enum_prefix) = false; + + // zero-value for channel ordering + ORDER_NONE_UNSPECIFIED = 0 [ (gogoproto.enumvalue_customname) = "NONE" ]; + // packets can be delivered in any order, which may differ from the order in + // which they were sent. + ORDER_UNORDERED = 1 [ (gogoproto.enumvalue_customname) = "UNORDERED" ]; + // packets are delivered exactly in the order which they were sent + ORDER_ORDERED = 2 [ (gogoproto.enumvalue_customname) = "ORDERED" ]; +} + +// Counterparty defines a channel end counterparty +message Counterparty { + option (gogoproto.goproto_getters) = false; + + // port on the counterparty chain which owns the other end of the channel. + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + // channel end on the counterparty chain + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; +} + +// Packet defines a type that carries data across different chains through IBC +message Packet { + option (gogoproto.goproto_getters) = false; + + // number corresponds to the order of sends and receives, where a Packet + // with an earlier sequence number must be sent and received before a Packet + // with a later sequence number. + uint64 sequence = 1; + // identifies the port on the sending chain. + string source_port = 2 [ (gogoproto.moretags) = "yaml:\"source_port\"" ]; + // identifies the channel end on the sending chain. + string source_channel = 3 + [ (gogoproto.moretags) = "yaml:\"source_channel\"" ]; + // identifies the port on the receiving chain. + string destination_port = 4 + [ (gogoproto.moretags) = "yaml:\"destination_port\"" ]; + // identifies the channel end on the receiving chain. + string destination_channel = 5 + [ (gogoproto.moretags) = "yaml:\"destination_channel\"" ]; + // actual opaque bytes transferred directly to the application module + bytes data = 6; + // block height after which the packet times out + ibcgo.core.client.v1.Height timeout_height = 7 [ + (gogoproto.moretags) = "yaml:\"timeout_height\"", + (gogoproto.nullable) = false + ]; + // block timestamp (in nanoseconds) after which the packet times out + uint64 timeout_timestamp = 8 + [ (gogoproto.moretags) = "yaml:\"timeout_timestamp\"" ]; +} + +// PacketState defines the generic type necessary to retrieve and store +// packet commitments, acknowledgements, and receipts. +// Caller is responsible for knowing the context necessary to interpret this +// state as a commitment, acknowledgement, or a receipt. +message PacketState { + option (gogoproto.goproto_getters) = false; + + // channel port identifier. + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + // channel unique identifier. + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + // packet sequence. + uint64 sequence = 3; + // embedded data that represents packet state. + bytes data = 4; +} + +// Acknowledgement is the recommended acknowledgement format to be used by +// app-specific protocols. +// NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental +// conflicts with other protobuf message formats used for acknowledgements. +// The first byte of any message with this format will be the non-ASCII values +// `0xaa` (result) or `0xb2` (error). Implemented as defined by ICS: +// https://github.com/cosmos/ics/tree/master/spec/ics-004-channel-and-packet-semantics#acknowledgement-envelope +message Acknowledgement { + // response contains either a result or an error and must be non-empty + oneof response { + bytes result = 21; + string error = 22; + } +} diff --git a/proto/ibcgo/core/channel/v1/genesis.proto b/proto/ibcgo/core/channel/v1/genesis.proto new file mode 100644 index 0000000000..12f67486d2 --- /dev/null +++ b/proto/ibcgo/core/channel/v1/genesis.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package ibcgo.core.channel.v1; + +option go_package = "github.com/cosmos/ibc-go/core/04-channel/types"; + +import "gogoproto/gogo.proto"; +import "ibcgo/core/channel/v1/channel.proto"; + +// GenesisState defines the ibc channel submodule's genesis state. +message GenesisState { + repeated IdentifiedChannel channels = 1 [ + (gogoproto.casttype) = "IdentifiedChannel", + (gogoproto.nullable) = false + ]; + repeated PacketState acknowledgements = 2 [ (gogoproto.nullable) = false ]; + repeated PacketState commitments = 3 [ (gogoproto.nullable) = false ]; + repeated PacketState receipts = 4 [ (gogoproto.nullable) = false ]; + repeated PacketSequence send_sequences = 5 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"send_sequences\"" + ]; + repeated PacketSequence recv_sequences = 6 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"recv_sequences\"" + ]; + repeated PacketSequence ack_sequences = 7 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"ack_sequences\"" + ]; + // the sequence for the next generated channel identifier + uint64 next_channel_sequence = 8 + [ (gogoproto.moretags) = "yaml:\"next_channel_sequence\"" ]; +} + +// PacketSequence defines the genesis type necessary to retrieve and store +// next send and receive sequences. +message PacketSequence { + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + uint64 sequence = 3; +} diff --git a/proto/ibcgo/core/channel/v1/query.proto b/proto/ibcgo/core/channel/v1/query.proto new file mode 100644 index 0000000000..a989b2adfa --- /dev/null +++ b/proto/ibcgo/core/channel/v1/query.proto @@ -0,0 +1,389 @@ +syntax = "proto3"; + +package ibcgo.core.channel.v1; + +option go_package = "github.com/cosmos/ibc-go/core/04-channel/types"; + +import "ibcgo/core/client/v1/client.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "ibcgo/core/channel/v1/channel.proto"; +import "google/api/annotations.proto"; +import "google/protobuf/any.proto"; +import "gogoproto/gogo.proto"; + +// Query provides defines the gRPC querier service +service Query { + // Channel queries an IBC Channel. + rpc Channel(QueryChannelRequest) returns (QueryChannelResponse) { + option (google.api.http).get = + "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}"; + } + + // Channels queries all the IBC channels of a chain. + rpc Channels(QueryChannelsRequest) returns (QueryChannelsResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels"; + } + + // ConnectionChannels queries all the channels associated with a connection + // end. + rpc ConnectionChannels(QueryConnectionChannelsRequest) + returns (QueryConnectionChannelsResponse) { + option (google.api.http).get = + "/ibc/core/channel/v1/connections/{connection}/channels"; + } + + // ChannelClientState queries for the client state for the channel associated + // with the provided channel identifiers. + rpc ChannelClientState(QueryChannelClientStateRequest) + returns (QueryChannelClientStateResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/" + "ports/{port_id}/client_state"; + } + + // ChannelConsensusState queries for the consensus state for the channel + // associated with the provided channel identifiers. + rpc ChannelConsensusState(QueryChannelConsensusStateRequest) + returns (QueryChannelConsensusStateResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/" + "ports/{port_id}/consensus_state/revision/" + "{revision_number}/height/{revision_height}"; + } + + // PacketCommitment queries a stored packet commitment hash. + rpc PacketCommitment(QueryPacketCommitmentRequest) + returns (QueryPacketCommitmentResponse) { + option (google.api.http).get = + "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/" + "packet_commitments/{sequence}"; + } + + // PacketCommitments returns all the packet commitments hashes associated + // with a channel. + rpc PacketCommitments(QueryPacketCommitmentsRequest) + returns (QueryPacketCommitmentsResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/" + "ports/{port_id}/packet_commitments"; + } + + // PacketReceipt queries if a given packet sequence has been received on the + // queried chain + rpc PacketReceipt(QueryPacketReceiptRequest) + returns (QueryPacketReceiptResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/" + "ports/{port_id}/packet_receipts/{sequence}"; + } + + // PacketAcknowledgement queries a stored packet acknowledgement hash. + rpc PacketAcknowledgement(QueryPacketAcknowledgementRequest) + returns (QueryPacketAcknowledgementResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/" + "ports/{port_id}/packet_acks/{sequence}"; + } + + // PacketAcknowledgements returns all the packet acknowledgements associated + // with a channel. + rpc PacketAcknowledgements(QueryPacketAcknowledgementsRequest) + returns (QueryPacketAcknowledgementsResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/" + "ports/{port_id}/packet_acknowledgements"; + } + + // UnreceivedPackets returns all the unreceived IBC packets associated with a + // channel and sequences. + rpc UnreceivedPackets(QueryUnreceivedPacketsRequest) + returns (QueryUnreceivedPacketsResponse) { + option (google.api.http).get = + "/ibc/core/channel/v1/channels/{channel_id}/ports/{port_id}/" + "packet_commitments/" + "{packet_commitment_sequences}/unreceived_packets"; + } + + // UnreceivedAcks returns all the unreceived IBC acknowledgements associated + // with a channel and sequences. + rpc UnreceivedAcks(QueryUnreceivedAcksRequest) + returns (QueryUnreceivedAcksResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/" + "ports/{port_id}/packet_commitments/" + "{packet_ack_sequences}/unreceived_acks"; + } + + // NextSequenceReceive returns the next receive sequence for a given channel. + rpc NextSequenceReceive(QueryNextSequenceReceiveRequest) + returns (QueryNextSequenceReceiveResponse) { + option (google.api.http).get = "/ibc/core/channel/v1/channels/{channel_id}/" + "ports/{port_id}/next_sequence"; + } +} + +// QueryChannelRequest is the request type for the Query/Channel RPC method +message QueryChannelRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; +} + +// QueryChannelResponse is the response type for the Query/Channel RPC method. +// Besides the Channel end, it includes a proof and the height from which the +// proof was retrieved. +message QueryChannelResponse { + // channel associated with the request identifiers + ibcgo.core.channel.v1.Channel channel = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryChannelsRequest is the request type for the Query/Channels RPC method +message QueryChannelsRequest { + // pagination request + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryChannelsResponse is the response type for the Query/Channels RPC method. +message QueryChannelsResponse { + // list of stored channels of the chain. + repeated ibcgo.core.channel.v1.IdentifiedChannel channels = 1; + // pagination response + cosmos.base.query.v1beta1.PageResponse pagination = 2; + // query block height + ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryConnectionChannelsRequest is the request type for the +// Query/QueryConnectionChannels RPC method +message QueryConnectionChannelsRequest { + // connection unique identifier + string connection = 1; + // pagination request + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryConnectionChannelsResponse is the Response type for the +// Query/QueryConnectionChannels RPC method +message QueryConnectionChannelsResponse { + // list of channels associated with a connection. + repeated ibcgo.core.channel.v1.IdentifiedChannel channels = 1; + // pagination response + cosmos.base.query.v1beta1.PageResponse pagination = 2; + // query block height + ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryChannelClientStateRequest is the request type for the Query/ClientState +// RPC method +message QueryChannelClientStateRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; +} + +// QueryChannelClientStateResponse is the Response type for the +// Query/QueryChannelClientState RPC method +message QueryChannelClientStateResponse { + // client state associated with the channel + ibcgo.core.client.v1.IdentifiedClientState identified_client_state = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryChannelConsensusStateRequest is the request type for the +// Query/ConsensusState RPC method +message QueryChannelConsensusStateRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; + // revision number of the consensus state + uint64 revision_number = 3; + // revision height of the consensus state + uint64 revision_height = 4; +} + +// QueryChannelClientStateResponse is the Response type for the +// Query/QueryChannelClientState RPC method +message QueryChannelConsensusStateResponse { + // consensus state associated with the channel + google.protobuf.Any consensus_state = 1; + // client ID associated with the consensus state + string client_id = 2; + // merkle proof of existence + bytes proof = 3; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ]; +} + +// QueryPacketCommitmentRequest is the request type for the +// Query/PacketCommitment RPC method +message QueryPacketCommitmentRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; + // packet sequence + uint64 sequence = 3; +} + +// QueryPacketCommitmentResponse defines the client query response for a packet +// which also includes a proof and the height from which the proof was +// retrieved +message QueryPacketCommitmentResponse { + // packet associated with the request fields + bytes commitment = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryPacketCommitmentsRequest is the request type for the +// Query/QueryPacketCommitments RPC method +message QueryPacketCommitmentsRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; + // pagination request + cosmos.base.query.v1beta1.PageRequest pagination = 3; +} + +// QueryPacketCommitmentsResponse is the request type for the +// Query/QueryPacketCommitments RPC method +message QueryPacketCommitmentsResponse { + repeated ibcgo.core.channel.v1.PacketState commitments = 1; + // pagination response + cosmos.base.query.v1beta1.PageResponse pagination = 2; + // query block height + ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryPacketReceiptRequest is the request type for the +// Query/PacketReceipt RPC method +message QueryPacketReceiptRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; + // packet sequence + uint64 sequence = 3; +} + +// QueryPacketReceiptResponse defines the client query response for a packet +// receipt which also includes a proof, and the height from which the proof was +// retrieved +message QueryPacketReceiptResponse { + // success flag for if receipt exists + bool received = 2; + // merkle proof of existence + bytes proof = 3; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ]; +} + +// QueryPacketAcknowledgementRequest is the request type for the +// Query/PacketAcknowledgement RPC method +message QueryPacketAcknowledgementRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; + // packet sequence + uint64 sequence = 3; +} + +// QueryPacketAcknowledgementResponse defines the client query response for a +// packet which also includes a proof and the height from which the +// proof was retrieved +message QueryPacketAcknowledgementResponse { + // packet associated with the request fields + bytes acknowledgement = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryPacketAcknowledgementsRequest is the request type for the +// Query/QueryPacketCommitments RPC method +message QueryPacketAcknowledgementsRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; + // pagination request + cosmos.base.query.v1beta1.PageRequest pagination = 3; +} + +// QueryPacketAcknowledgemetsResponse is the request type for the +// Query/QueryPacketAcknowledgements RPC method +message QueryPacketAcknowledgementsResponse { + repeated ibcgo.core.channel.v1.PacketState acknowledgements = 1; + // pagination response + cosmos.base.query.v1beta1.PageResponse pagination = 2; + // query block height + ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryUnreceivedPacketsRequest is the request type for the +// Query/UnreceivedPackets RPC method +message QueryUnreceivedPacketsRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; + // list of packet sequences + repeated uint64 packet_commitment_sequences = 3; +} + +// QueryUnreceivedPacketsResponse is the response type for the +// Query/UnreceivedPacketCommitments RPC method +message QueryUnreceivedPacketsResponse { + // list of unreceived packet sequences + repeated uint64 sequences = 1; + // query block height + ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ]; +} + +// QueryUnreceivedAcks is the request type for the +// Query/UnreceivedAcks RPC method +message QueryUnreceivedAcksRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; + // list of acknowledgement sequences + repeated uint64 packet_ack_sequences = 3; +} + +// QueryUnreceivedAcksResponse is the response type for the +// Query/UnreceivedAcks RPC method +message QueryUnreceivedAcksResponse { + // list of unreceived acknowledgement sequences + repeated uint64 sequences = 1; + // query block height + ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ]; +} + +// QueryNextSequenceReceiveRequest is the request type for the +// Query/QueryNextSequenceReceiveRequest RPC method +message QueryNextSequenceReceiveRequest { + // port unique identifier + string port_id = 1; + // channel unique identifier + string channel_id = 2; +} + +// QuerySequenceResponse is the request type for the +// Query/QueryNextSequenceReceiveResponse RPC method +message QueryNextSequenceReceiveResponse { + // next sequence receive number + uint64 next_sequence_receive = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} diff --git a/proto/ibcgo/core/channel/v1/tx.proto b/proto/ibcgo/core/channel/v1/tx.proto new file mode 100644 index 0000000000..290c3a946a --- /dev/null +++ b/proto/ibcgo/core/channel/v1/tx.proto @@ -0,0 +1,239 @@ +syntax = "proto3"; + +package ibcgo.core.channel.v1; + +option go_package = "github.com/cosmos/ibc-go/core/04-channel/types"; + +import "gogoproto/gogo.proto"; +import "ibcgo/core/client/v1/client.proto"; +import "ibcgo/core/channel/v1/channel.proto"; + +// Msg defines the ibc/channel Msg service. +service Msg { + // ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit. + rpc ChannelOpenInit(MsgChannelOpenInit) returns (MsgChannelOpenInitResponse); + + // ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry. + rpc ChannelOpenTry(MsgChannelOpenTry) returns (MsgChannelOpenTryResponse); + + // ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck. + rpc ChannelOpenAck(MsgChannelOpenAck) returns (MsgChannelOpenAckResponse); + + // ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm. + rpc ChannelOpenConfirm(MsgChannelOpenConfirm) + returns (MsgChannelOpenConfirmResponse); + + // ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit. + rpc ChannelCloseInit(MsgChannelCloseInit) + returns (MsgChannelCloseInitResponse); + + // ChannelCloseConfirm defines a rpc handler method for + // MsgChannelCloseConfirm. + rpc ChannelCloseConfirm(MsgChannelCloseConfirm) + returns (MsgChannelCloseConfirmResponse); + + // RecvPacket defines a rpc handler method for MsgRecvPacket. + rpc RecvPacket(MsgRecvPacket) returns (MsgRecvPacketResponse); + + // Timeout defines a rpc handler method for MsgTimeout. + rpc Timeout(MsgTimeout) returns (MsgTimeoutResponse); + + // TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose. + rpc TimeoutOnClose(MsgTimeoutOnClose) returns (MsgTimeoutOnCloseResponse); + + // Acknowledgement defines a rpc handler method for MsgAcknowledgement. + rpc Acknowledgement(MsgAcknowledgement) returns (MsgAcknowledgementResponse); +} + +// MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It +// is called by a relayer on Chain A. +message MsgChannelOpenInit { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + Channel channel = 2 [ (gogoproto.nullable) = false ]; + string signer = 3; +} + +// MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type. +message MsgChannelOpenInitResponse {} + +// MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel +// on Chain B. +message MsgChannelOpenTry { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + // in the case of crossing hello's, when both chains call OpenInit, we need + // the channel identifier of the previous channel in state INIT + string previous_channel_id = 2 + [ (gogoproto.moretags) = "yaml:\"previous_channel_id\"" ]; + Channel channel = 3 [ (gogoproto.nullable) = false ]; + string counterparty_version = 4 + [ (gogoproto.moretags) = "yaml:\"counterparty_version\"" ]; + bytes proof_init = 5 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ]; + ibcgo.core.client.v1.Height proof_height = 6 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + string signer = 7; +} + +// MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type. +message MsgChannelOpenTryResponse {} + +// MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge +// the change of channel state to TRYOPEN on Chain B. +message MsgChannelOpenAck { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + string counterparty_channel_id = 3 + [ (gogoproto.moretags) = "yaml:\"counterparty_channel_id\"" ]; + string counterparty_version = 4 + [ (gogoproto.moretags) = "yaml:\"counterparty_version\"" ]; + bytes proof_try = 5 [ (gogoproto.moretags) = "yaml:\"proof_try\"" ]; + ibcgo.core.client.v1.Height proof_height = 6 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + string signer = 7; +} + +// MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type. +message MsgChannelOpenAckResponse {} + +// MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to +// acknowledge the change of channel state to OPEN on Chain A. +message MsgChannelOpenConfirm { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + bytes proof_ack = 3 [ (gogoproto.moretags) = "yaml:\"proof_ack\"" ]; + ibcgo.core.client.v1.Height proof_height = 4 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + string signer = 5; +} + +// MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response +// type. +message MsgChannelOpenConfirmResponse {} + +// MsgChannelCloseInit defines a msg sent by a Relayer to Chain A +// to close a channel with Chain B. +message MsgChannelCloseInit { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + string signer = 3; +} + +// MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type. +message MsgChannelCloseInitResponse {} + +// MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B +// to acknowledge the change of channel state to CLOSED on Chain A. +message MsgChannelCloseConfirm { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string port_id = 1 [ (gogoproto.moretags) = "yaml:\"port_id\"" ]; + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + bytes proof_init = 3 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ]; + ibcgo.core.client.v1.Height proof_height = 4 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + string signer = 5; +} + +// MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response +// type. +message MsgChannelCloseConfirmResponse {} + +// MsgRecvPacket receives incoming IBC packet +message MsgRecvPacket { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + Packet packet = 1 [ (gogoproto.nullable) = false ]; + bytes proof_commitment = 2 + [ (gogoproto.moretags) = "yaml:\"proof_commitment\"" ]; + ibcgo.core.client.v1.Height proof_height = 3 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + string signer = 4; +} + +// MsgRecvPacketResponse defines the Msg/RecvPacket response type. +message MsgRecvPacketResponse {} + +// MsgTimeout receives timed-out packet +message MsgTimeout { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + Packet packet = 1 [ (gogoproto.nullable) = false ]; + bytes proof_unreceived = 2 + [ (gogoproto.moretags) = "yaml:\"proof_unreceived\"" ]; + ibcgo.core.client.v1.Height proof_height = 3 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + uint64 next_sequence_recv = 4 + [ (gogoproto.moretags) = "yaml:\"next_sequence_recv\"" ]; + string signer = 5; +} + +// MsgTimeoutResponse defines the Msg/Timeout response type. +message MsgTimeoutResponse {} + +// MsgTimeoutOnClose timed-out packet upon counterparty channel closure. +message MsgTimeoutOnClose { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + Packet packet = 1 [ (gogoproto.nullable) = false ]; + bytes proof_unreceived = 2 + [ (gogoproto.moretags) = "yaml:\"proof_unreceived\"" ]; + bytes proof_close = 3 [ (gogoproto.moretags) = "yaml:\"proof_close\"" ]; + ibcgo.core.client.v1.Height proof_height = 4 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + uint64 next_sequence_recv = 5 + [ (gogoproto.moretags) = "yaml:\"next_sequence_recv\"" ]; + string signer = 6; +} + +// MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type. +message MsgTimeoutOnCloseResponse {} + +// MsgAcknowledgement receives incoming IBC acknowledgement +message MsgAcknowledgement { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + Packet packet = 1 [ (gogoproto.nullable) = false ]; + bytes acknowledgement = 2; + bytes proof_acked = 3 [ (gogoproto.moretags) = "yaml:\"proof_acked\"" ]; + ibcgo.core.client.v1.Height proof_height = 4 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + string signer = 5; +} + +// MsgAcknowledgementResponse defines the Msg/Acknowledgement response type. +message MsgAcknowledgementResponse {} diff --git a/proto/ibcgo/core/client/v1/client.proto b/proto/ibcgo/core/client/v1/client.proto new file mode 100644 index 0000000000..6e036bed0c --- /dev/null +++ b/proto/ibcgo/core/client/v1/client.proto @@ -0,0 +1,96 @@ +syntax = "proto3"; + +package ibcgo.core.client.v1; + +option go_package = "github.com/cosmos/ibc-go/core/02-client/types"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; + +// IdentifiedClientState defines a client state with an additional client +// identifier field. +message IdentifiedClientState { + // client identifier + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // client state + google.protobuf.Any client_state = 2 + [ (gogoproto.moretags) = "yaml:\"client_state\"" ]; +} + +// ConsensusStateWithHeight defines a consensus state with an additional height +// field. +message ConsensusStateWithHeight { + // consensus state height + Height height = 1 [ (gogoproto.nullable) = false ]; + // consensus state + google.protobuf.Any consensus_state = 2 + [ (gogoproto.moretags) = "yaml\"consensus_state\"" ]; +} + +// ClientConsensusStates defines all the stored consensus states for a given +// client. +message ClientConsensusStates { + // client identifier + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // consensus states and their heights associated with the client + repeated ConsensusStateWithHeight consensus_states = 2 [ + (gogoproto.moretags) = "yaml:\"consensus_states\"", + (gogoproto.nullable) = false + ]; +} + +// ClientUpdateProposal is a governance proposal. If it passes, the substitute +// client's consensus states starting from the 'initial height' are copied over +// to the subjects client state. The proposal handler may fail if the subject +// and the substitute do not match in client and chain parameters (with +// exception to latest height, frozen height, and chain-id). The updated client +// must also be valid (cannot be expired). +message ClientUpdateProposal { + option (gogoproto.goproto_getters) = false; + // the title of the update proposal + string title = 1; + // the description of the proposal + string description = 2; + // the client identifier for the client to be updated if the proposal passes + string subject_client_id = 3 + [ (gogoproto.moretags) = "yaml:\"subject_client_id\"" ]; + // the substitute client identifier for the client standing in for the subject + // client + string substitute_client_id = 4 + [ (gogoproto.moretags) = "yaml:\"susbtitute_client_id\"" ]; + // the intital height to copy consensus states from the substitute to the + // subject + Height initial_height = 5 [ + (gogoproto.moretags) = "yaml:\"initial_height\"", + (gogoproto.nullable) = false + ]; +} + +// Height is a monotonically increasing data type +// that can be compared against another Height for the purposes of updating and +// freezing clients +// +// Normally the RevisionHeight is incremented at each height while keeping +// RevisionNumber the same. However some consensus algorithms may choose to +// reset the height in certain conditions e.g. hard forks, state-machine +// breaking changes In these cases, the RevisionNumber is incremented so that +// height continues to be monitonically increasing even as the RevisionHeight +// gets reset +message Height { + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = false; + + // the revision that the client is currently on + uint64 revision_number = 1 + [ (gogoproto.moretags) = "yaml:\"revision_number\"" ]; + // the height within the given revision + uint64 revision_height = 2 + [ (gogoproto.moretags) = "yaml:\"revision_height\"" ]; +} + +// Params defines the set of IBC light client parameters. +message Params { + // allowed_clients defines the list of allowed client state types. + repeated string allowed_clients = 1 + [ (gogoproto.moretags) = "yaml:\"allowed_clients\"" ]; +} diff --git a/proto/ibcgo/core/client/v1/genesis.proto b/proto/ibcgo/core/client/v1/genesis.proto new file mode 100644 index 0000000000..fc1c5d2de8 --- /dev/null +++ b/proto/ibcgo/core/client/v1/genesis.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; + +package ibcgo.core.client.v1; + +option go_package = "github.com/cosmos/ibc-go/core/02-client/types"; + +import "ibcgo/core/client/v1/client.proto"; +import "gogoproto/gogo.proto"; + +// GenesisState defines the ibc client submodule's genesis state. +message GenesisState { + // client states with their corresponding identifiers + repeated IdentifiedClientState clients = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "IdentifiedClientStates" + ]; + // consensus states from each client + repeated ClientConsensusStates clients_consensus = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ClientsConsensusStates", + (gogoproto.moretags) = "yaml:\"clients_consensus\"" + ]; + // metadata from each client + repeated IdentifiedGenesisMetadata clients_metadata = 3 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"clients_metadata\"" + ]; + Params params = 4 [ (gogoproto.nullable) = false ]; + // create localhost on initialization + bool create_localhost = 5 + [ (gogoproto.moretags) = "yaml:\"create_localhost\"" ]; + // the sequence for the next generated client identifier + uint64 next_client_sequence = 6 + [ (gogoproto.moretags) = "yaml:\"next_client_sequence\"" ]; +} + +// GenesisMetadata defines the genesis type for metadata that clients may return +// with ExportMetadata +message GenesisMetadata { + option (gogoproto.goproto_getters) = false; + + // store key of metadata without clientID-prefix + bytes key = 1; + // metadata value + bytes value = 2; +} + +// IdentifiedGenesisMetadata has the client metadata with the corresponding +// client id. +message IdentifiedGenesisMetadata { + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + repeated GenesisMetadata client_metadata = 2 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"client_metadata\"" + ]; +} diff --git a/proto/ibcgo/core/client/v1/query.proto b/proto/ibcgo/core/client/v1/query.proto new file mode 100644 index 0000000000..fe218af442 --- /dev/null +++ b/proto/ibcgo/core/client/v1/query.proto @@ -0,0 +1,143 @@ +syntax = "proto3"; + +package ibcgo.core.client.v1; + +option go_package = "github.com/cosmos/ibc-go/core/02-client/types"; + +import "cosmos/base/query/v1beta1/pagination.proto"; +import "ibcgo/core/client/v1/client.proto"; +import "google/protobuf/any.proto"; +import "google/api/annotations.proto"; +import "gogoproto/gogo.proto"; + +// Query provides defines the gRPC querier service +service Query { + // ClientState queries an IBC light client. + rpc ClientState(QueryClientStateRequest) returns (QueryClientStateResponse) { + option (google.api.http).get = + "/ibc/core/client/v1/client_states/{client_id}"; + } + + // ClientStates queries all the IBC light clients of a chain. + rpc ClientStates(QueryClientStatesRequest) + returns (QueryClientStatesResponse) { + option (google.api.http).get = "/ibc/core/client/v1/client_states"; + } + + // ConsensusState queries a consensus state associated with a client state at + // a given height. + rpc ConsensusState(QueryConsensusStateRequest) + returns (QueryConsensusStateResponse) { + option (google.api.http).get = "/ibc/core/client/v1/consensus_states/" + "{client_id}/revision/{revision_number}/" + "height/{revision_height}"; + } + + // ConsensusStates queries all the consensus state associated with a given + // client. + rpc ConsensusStates(QueryConsensusStatesRequest) + returns (QueryConsensusStatesResponse) { + option (google.api.http).get = + "/ibc/core/client/v1/consensus_states/{client_id}"; + } + + // ClientParams queries all parameters of the ibc client. + rpc ClientParams(QueryClientParamsRequest) + returns (QueryClientParamsResponse) { + option (google.api.http).get = "/ibc/client/v1/params"; + } +} + +// QueryClientStateRequest is the request type for the Query/ClientState RPC +// method +message QueryClientStateRequest { + // client state unique identifier + string client_id = 1; +} + +// QueryClientStateResponse is the response type for the Query/ClientState RPC +// method. Besides the client state, it includes a proof and the height from +// which the proof was retrieved. +message QueryClientStateResponse { + // client state associated with the request identifier + google.protobuf.Any client_state = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryClientStatesRequest is the request type for the Query/ClientStates RPC +// method +message QueryClientStatesRequest { + // pagination request + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryClientStatesResponse is the response type for the Query/ClientStates RPC +// method. +message QueryClientStatesResponse { + // list of stored ClientStates of the chain. + repeated IdentifiedClientState client_states = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "IdentifiedClientStates" + ]; + // pagination response + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryConsensusStateRequest is the request type for the Query/ConsensusState +// RPC method. Besides the consensus state, it includes a proof and the height +// from which the proof was retrieved. +message QueryConsensusStateRequest { + // client identifier + string client_id = 1; + // consensus state revision number + uint64 revision_number = 2; + // consensus state revision height + uint64 revision_height = 3; + // latest_height overrrides the height field and queries the latest stored + // ConsensusState + bool latest_height = 4; +} + +// QueryConsensusStateResponse is the response type for the Query/ConsensusState +// RPC method +message QueryConsensusStateResponse { + // consensus state associated with the client identifier at the given height + google.protobuf.Any consensus_state = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryConsensusStatesRequest is the request type for the Query/ConsensusStates +// RPC method. +message QueryConsensusStatesRequest { + // client identifier + string client_id = 1; + // pagination request + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryConsensusStatesResponse is the response type for the +// Query/ConsensusStates RPC method +message QueryConsensusStatesResponse { + // consensus states associated with the identifier + repeated ConsensusStateWithHeight consensus_states = 1 + [ (gogoproto.nullable) = false ]; + // pagination response + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryClientParamsRequest is the request type for the Query/ClientParams RPC +// method. +message QueryClientParamsRequest {} + +// QueryClientParamsResponse is the response type for the Query/ClientParams RPC +// method. +message QueryClientParamsResponse { + // params defines the parameters of the module. + Params params = 1; +} diff --git a/proto/ibcgo/core/client/v1/tx.proto b/proto/ibcgo/core/client/v1/tx.proto new file mode 100644 index 0000000000..722f6b49cf --- /dev/null +++ b/proto/ibcgo/core/client/v1/tx.proto @@ -0,0 +1,107 @@ +syntax = "proto3"; + +package ibcgo.core.client.v1; + +option go_package = "github.com/cosmos/ibc-go/core/02-client/types"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "ibcgo/core/client/v1/client.proto"; + +// Msg defines the ibc/client Msg service. +service Msg { + // CreateClient defines a rpc handler method for MsgCreateClient. + rpc CreateClient(MsgCreateClient) returns (MsgCreateClientResponse); + + // UpdateClient defines a rpc handler method for MsgUpdateClient. + rpc UpdateClient(MsgUpdateClient) returns (MsgUpdateClientResponse); + + // UpgradeClient defines a rpc handler method for MsgUpgradeClient. + rpc UpgradeClient(MsgUpgradeClient) returns (MsgUpgradeClientResponse); + + // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. + rpc SubmitMisbehaviour(MsgSubmitMisbehaviour) + returns (MsgSubmitMisbehaviourResponse); +} + +// MsgCreateClient defines a message to create an IBC client +message MsgCreateClient { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + // light client state + google.protobuf.Any client_state = 1 + [ (gogoproto.moretags) = "yaml:\"client_state\"" ]; + // consensus state associated with the client that corresponds to a given + // height. + google.protobuf.Any consensus_state = 2 + [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ]; + // signer address + string signer = 3; +} + +// MsgCreateClientResponse defines the Msg/CreateClient response type. +message MsgCreateClientResponse {} + +// MsgUpdateClient defines an sdk.Msg to update a IBC client state using +// the given header. +message MsgUpdateClient { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + // client unique identifier + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // header to update the light client + google.protobuf.Any header = 2; + // signer address + string signer = 3; +} + +// MsgUpdateClientResponse defines the Msg/UpdateClient response type. +message MsgUpdateClientResponse {} + +// MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client +// state +message MsgUpgradeClient { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + // client unique identifier + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // upgraded client state + google.protobuf.Any client_state = 2 + [ (gogoproto.moretags) = "yaml:\"client_state\"" ]; + // upgraded consensus state, only contains enough information to serve as a + // basis of trust in update logic + google.protobuf.Any consensus_state = 3 + [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ]; + // proof that old chain committed to new client + bytes proof_upgrade_client = 4 + [ (gogoproto.moretags) = "yaml:\"proof_upgrade_client\"" ]; + // proof that old chain committed to new consensus state + bytes proof_upgrade_consensus_state = 5 + [ (gogoproto.moretags) = "yaml:\"proof_upgrade_consensus_state\"" ]; + // signer address + string signer = 6; +} + +// MsgUpgradeClientResponse defines the Msg/UpgradeClient response type. +message MsgUpgradeClientResponse {} + +// MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for +// light client misbehaviour. +message MsgSubmitMisbehaviour { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + // client unique identifier + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // misbehaviour used for freezing the light client + google.protobuf.Any misbehaviour = 2; + // signer address + string signer = 3; +} + +// MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response +// type. +message MsgSubmitMisbehaviourResponse {} diff --git a/proto/ibcgo/core/commitment/v1/commitment.proto b/proto/ibcgo/core/commitment/v1/commitment.proto new file mode 100644 index 0000000000..373a77ff83 --- /dev/null +++ b/proto/ibcgo/core/commitment/v1/commitment.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package ibcgo.core.commitment.v1; + +option go_package = "github.com/cosmos/ibc-go/core/23-commitment/types"; + +import "gogoproto/gogo.proto"; +import "confio/proofs.proto"; + +// MerkleRoot defines a merkle root hash. +// In the Cosmos SDK, the AppHash of a block header becomes the root. +message MerkleRoot { + option (gogoproto.goproto_getters) = false; + + bytes hash = 1; +} + +// MerklePrefix is merkle path prefixed to the key. +// The constructed key from the Path and the key will be append(Path.KeyPath, +// append(Path.KeyPrefix, key...)) +message MerklePrefix { + bytes key_prefix = 1 [ (gogoproto.moretags) = "yaml:\"key_prefix\"" ]; +} + +// MerklePath is the path used to verify commitment proofs, which can be an +// arbitrary structured object (defined by a commitment type). +// MerklePath is represented from root-to-leaf +message MerklePath { + option (gogoproto.goproto_stringer) = false; + + repeated string key_path = 1 [ (gogoproto.moretags) = "yaml:\"key_path\"" ]; +} + +// MerkleProof is a wrapper type over a chain of CommitmentProofs. +// It demonstrates membership or non-membership for an element or set of +// elements, verifiable in conjunction with a known commitment root. Proofs +// should be succinct. +// MerkleProofs are ordered from leaf-to-root +message MerkleProof { repeated ics23.CommitmentProof proofs = 1; } diff --git a/proto/ibcgo/core/connection/v1/connection.proto b/proto/ibcgo/core/connection/v1/connection.proto new file mode 100644 index 0000000000..39f3925c55 --- /dev/null +++ b/proto/ibcgo/core/connection/v1/connection.proto @@ -0,0 +1,108 @@ +syntax = "proto3"; + +package ibcgo.core.connection.v1; + +option go_package = "github.com/cosmos/ibc-go/core/03-connection/types"; + +import "gogoproto/gogo.proto"; +import "ibcgo/core/commitment/v1/commitment.proto"; + +// ICS03 - Connection Data Structures as defined in +// https://github.com/cosmos/ics/tree/master/spec/ics-003-connection-semantics#data-structures + +// ConnectionEnd defines a stateful object on a chain connected to another +// separate one. +// NOTE: there must only be 2 defined ConnectionEnds to establish +// a connection between two chains. +message ConnectionEnd { + option (gogoproto.goproto_getters) = false; + // client associated with this connection. + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // IBC version which can be utilised to determine encodings or protocols for + // channels or packets utilising this connection. + repeated Version versions = 2; + // current state of the connection end. + State state = 3; + // counterparty chain associated with this connection. + Counterparty counterparty = 4 [ (gogoproto.nullable) = false ]; + // delay period that must pass before a consensus state can be used for + // packet-verification NOTE: delay period logic is only implemented by some + // clients. + uint64 delay_period = 5 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ]; +} + +// IdentifiedConnection defines a connection with additional connection +// identifier field. +message IdentifiedConnection { + option (gogoproto.goproto_getters) = false; + // connection identifier. + string id = 1 [ (gogoproto.moretags) = "yaml:\"id\"" ]; + // client associated with this connection. + string client_id = 2 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // IBC version which can be utilised to determine encodings or protocols for + // channels or packets utilising this connection + repeated Version versions = 3; + // current state of the connection end. + State state = 4; + // counterparty chain associated with this connection. + Counterparty counterparty = 5 [ (gogoproto.nullable) = false ]; + // delay period associated with this connection. + uint64 delay_period = 6 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ]; +} + +// State defines if a connection is in one of the following states: +// INIT, TRYOPEN, OPEN or UNINITIALIZED. +enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Default State + STATE_UNINITIALIZED_UNSPECIFIED = 0 + [ (gogoproto.enumvalue_customname) = "UNINITIALIZED" ]; + // A connection end has just started the opening handshake. + STATE_INIT = 1 [ (gogoproto.enumvalue_customname) = "INIT" ]; + // A connection end has acknowledged the handshake step on the counterparty + // chain. + STATE_TRYOPEN = 2 [ (gogoproto.enumvalue_customname) = "TRYOPEN" ]; + // A connection end has completed the handshake. + STATE_OPEN = 3 [ (gogoproto.enumvalue_customname) = "OPEN" ]; +} + +// Counterparty defines the counterparty chain associated with a connection end. +message Counterparty { + option (gogoproto.goproto_getters) = false; + + // identifies the client on the counterparty chain associated with a given + // connection. + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // identifies the connection end on the counterparty chain associated with a + // given connection. + string connection_id = 2 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ]; + // commitment merkle prefix of the counterparty chain. + ibcgo.core.commitment.v1.MerklePrefix prefix = 3 + [ (gogoproto.nullable) = false ]; +} + +// ClientPaths define all the connection paths for a client state. +message ClientPaths { + // list of connection paths + repeated string paths = 1; +} + +// ConnectionPaths define all the connection paths for a given client state. +message ConnectionPaths { + // client state unique identifier + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // list of connection paths + repeated string paths = 2; +} + +// Version defines the versioning scheme used to negotiate the IBC verison in +// the connection handshake. +message Version { + option (gogoproto.goproto_getters) = false; + + // unique version identifier + string identifier = 1; + // list of features compatible with the specified identifier + repeated string features = 2; +} diff --git a/proto/ibcgo/core/connection/v1/genesis.proto b/proto/ibcgo/core/connection/v1/genesis.proto new file mode 100644 index 0000000000..3e693c84d4 --- /dev/null +++ b/proto/ibcgo/core/connection/v1/genesis.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package ibcgo.core.connection.v1; + +option go_package = "github.com/cosmos/ibc-go/core/03-connection/types"; + +import "gogoproto/gogo.proto"; +import "ibcgo/core/connection/v1/connection.proto"; + +// GenesisState defines the ibc connection submodule's genesis state. +message GenesisState { + repeated IdentifiedConnection connections = 1 + [ (gogoproto.nullable) = false ]; + repeated ConnectionPaths client_connection_paths = 2 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"client_connection_paths\"" + ]; + // the sequence for the next generated connection identifier + uint64 next_connection_sequence = 3 + [ (gogoproto.moretags) = "yaml:\"next_connection_sequence\"" ]; +} diff --git a/proto/ibcgo/core/connection/v1/query.proto b/proto/ibcgo/core/connection/v1/query.proto new file mode 100644 index 0000000000..c4ff165a5d --- /dev/null +++ b/proto/ibcgo/core/connection/v1/query.proto @@ -0,0 +1,145 @@ +syntax = "proto3"; + +package ibcgo.core.connection.v1; + +option go_package = "github.com/cosmos/ibc-go/core/03-connection/types"; + +import "gogoproto/gogo.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "ibcgo/core/client/v1/client.proto"; +import "ibcgo/core/connection/v1/connection.proto"; +import "google/api/annotations.proto"; +import "google/protobuf/any.proto"; + +// Query provides defines the gRPC querier service +service Query { + // Connection queries an IBC connection end. + rpc Connection(QueryConnectionRequest) returns (QueryConnectionResponse) { + option (google.api.http).get = + "/ibc/core/connection/v1/connections/{connection_id}"; + } + + // Connections queries all the IBC connections of a chain. + rpc Connections(QueryConnectionsRequest) returns (QueryConnectionsResponse) { + option (google.api.http).get = "/ibc/core/connection/v1/connections"; + } + + // ClientConnections queries the connection paths associated with a client + // state. + rpc ClientConnections(QueryClientConnectionsRequest) + returns (QueryClientConnectionsResponse) { + option (google.api.http).get = + "/ibc/core/connection/v1/client_connections/{client_id}"; + } + + // ConnectionClientState queries the client state associated with the + // connection. + rpc ConnectionClientState(QueryConnectionClientStateRequest) + returns (QueryConnectionClientStateResponse) { + option (google.api.http).get = + "/ibc/core/connection/v1/connections/{connection_id}/client_state"; + } + + // ConnectionConsensusState queries the consensus state associated with the + // connection. + rpc ConnectionConsensusState(QueryConnectionConsensusStateRequest) + returns (QueryConnectionConsensusStateResponse) { + option (google.api.http).get = + "/ibc/core/connection/v1/connections/{connection_id}/consensus_state/" + "revision/{revision_number}/height/{revision_height}"; + } +} + +// QueryConnectionRequest is the request type for the Query/Connection RPC +// method +message QueryConnectionRequest { + // connection unique identifier + string connection_id = 1; +} + +// QueryConnectionResponse is the response type for the Query/Connection RPC +// method. Besides the connection end, it includes a proof and the height from +// which the proof was retrieved. +message QueryConnectionResponse { + // connection associated with the request identifier + ibcgo.core.connection.v1.ConnectionEnd connection = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryConnectionsRequest is the request type for the Query/Connections RPC +// method +message QueryConnectionsRequest { + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryConnectionsResponse is the response type for the Query/Connections RPC +// method. +message QueryConnectionsResponse { + // list of stored connections of the chain. + repeated ibcgo.core.connection.v1.IdentifiedConnection connections = 1; + // pagination response + cosmos.base.query.v1beta1.PageResponse pagination = 2; + // query block height + ibcgo.core.client.v1.Height height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryClientConnectionsRequest is the request type for the +// Query/ClientConnections RPC method +message QueryClientConnectionsRequest { + // client identifier associated with a connection + string client_id = 1; +} + +// QueryClientConnectionsResponse is the response type for the +// Query/ClientConnections RPC method +message QueryClientConnectionsResponse { + // slice of all the connection paths associated with a client. + repeated string connection_paths = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was generated + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryConnectionClientStateRequest is the request type for the +// Query/ConnectionClientState RPC method +message QueryConnectionClientStateRequest { + // connection identifier + string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ]; +} + +// QueryConnectionClientStateResponse is the response type for the +// Query/ConnectionClientState RPC method +message QueryConnectionClientStateResponse { + // client state associated with the channel + ibcgo.core.client.v1.IdentifiedClientState identified_client_state = 1; + // merkle proof of existence + bytes proof = 2; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 3 [ (gogoproto.nullable) = false ]; +} + +// QueryConnectionConsensusStateRequest is the request type for the +// Query/ConnectionConsensusState RPC method +message QueryConnectionConsensusStateRequest { + // connection identifier + string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ]; + uint64 revision_number = 2; + uint64 revision_height = 3; +} + +// QueryConnectionConsensusStateResponse is the response type for the +// Query/ConnectionConsensusState RPC method +message QueryConnectionConsensusStateResponse { + // consensus state associated with the channel + google.protobuf.Any consensus_state = 1; + // client ID associated with the consensus state + string client_id = 2; + // merkle proof of existence + bytes proof = 3; + // height at which the proof was retrieved + ibcgo.core.client.v1.Height proof_height = 4 [ (gogoproto.nullable) = false ]; +} diff --git a/proto/ibcgo/core/connection/v1/tx.proto b/proto/ibcgo/core/connection/v1/tx.proto new file mode 100644 index 0000000000..a371633c9e --- /dev/null +++ b/proto/ibcgo/core/connection/v1/tx.proto @@ -0,0 +1,140 @@ +syntax = "proto3"; + +package ibcgo.core.connection.v1; + +option go_package = "github.com/cosmos/ibc-go/core/03-connection/types"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "ibcgo/core/client/v1/client.proto"; +import "ibcgo/core/connection/v1/connection.proto"; + +// Msg defines the ibc/connection Msg service. +service Msg { + // ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit. + rpc ConnectionOpenInit(MsgConnectionOpenInit) + returns (MsgConnectionOpenInitResponse); + + // ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry. + rpc ConnectionOpenTry(MsgConnectionOpenTry) + returns (MsgConnectionOpenTryResponse); + + // ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck. + rpc ConnectionOpenAck(MsgConnectionOpenAck) + returns (MsgConnectionOpenAckResponse); + + // ConnectionOpenConfirm defines a rpc handler method for + // MsgConnectionOpenConfirm. + rpc ConnectionOpenConfirm(MsgConnectionOpenConfirm) + returns (MsgConnectionOpenConfirmResponse); +} + +// MsgConnectionOpenInit defines the msg sent by an account on Chain A to +// initialize a connection with Chain B. +message MsgConnectionOpenInit { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + Counterparty counterparty = 2 [ (gogoproto.nullable) = false ]; + Version version = 3; + uint64 delay_period = 4 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ]; + string signer = 5; +} + +// MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response +// type. +message MsgConnectionOpenInitResponse {} + +// MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a +// connection on Chain B. +message MsgConnectionOpenTry { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + // in the case of crossing hello's, when both chains call OpenInit, we need + // the connection identifier of the previous connection in state INIT + string previous_connection_id = 2 + [ (gogoproto.moretags) = "yaml:\"previous_connection_id\"" ]; + google.protobuf.Any client_state = 3 + [ (gogoproto.moretags) = "yaml:\"client_state\"" ]; + Counterparty counterparty = 4 [ (gogoproto.nullable) = false ]; + uint64 delay_period = 5 [ (gogoproto.moretags) = "yaml:\"delay_period\"" ]; + repeated Version counterparty_versions = 6 + [ (gogoproto.moretags) = "yaml:\"counterparty_versions\"" ]; + ibcgo.core.client.v1.Height proof_height = 7 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + // proof of the initialization the connection on Chain A: `UNITIALIZED -> + // INIT` + bytes proof_init = 8 [ (gogoproto.moretags) = "yaml:\"proof_init\"" ]; + // proof of client state included in message + bytes proof_client = 9 [ (gogoproto.moretags) = "yaml:\"proof_client\"" ]; + // proof of client consensus state + bytes proof_consensus = 10 + [ (gogoproto.moretags) = "yaml:\"proof_consensus\"" ]; + ibcgo.core.client.v1.Height consensus_height = 11 [ + (gogoproto.moretags) = "yaml:\"consensus_height\"", + (gogoproto.nullable) = false + ]; + string signer = 12; +} + +// MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type. +message MsgConnectionOpenTryResponse {} + +// MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to +// acknowledge the change of connection state to TRYOPEN on Chain B. +message MsgConnectionOpenAck { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ]; + string counterparty_connection_id = 2 + [ (gogoproto.moretags) = "yaml:\"counterparty_connection_id\"" ]; + Version version = 3; + google.protobuf.Any client_state = 4 + [ (gogoproto.moretags) = "yaml:\"client_state\"" ]; + ibcgo.core.client.v1.Height proof_height = 5 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + // proof of the initialization the connection on Chain B: `UNITIALIZED -> + // TRYOPEN` + bytes proof_try = 6 [ (gogoproto.moretags) = "yaml:\"proof_try\"" ]; + // proof of client state included in message + bytes proof_client = 7 [ (gogoproto.moretags) = "yaml:\"proof_client\"" ]; + // proof of client consensus state + bytes proof_consensus = 8 + [ (gogoproto.moretags) = "yaml:\"proof_consensus\"" ]; + ibcgo.core.client.v1.Height consensus_height = 9 [ + (gogoproto.moretags) = "yaml:\"consensus_height\"", + (gogoproto.nullable) = false + ]; + string signer = 10; +} + +// MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type. +message MsgConnectionOpenAckResponse {} + +// MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to +// acknowledge the change of connection state to OPEN on Chain A. +message MsgConnectionOpenConfirm { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string connection_id = 1 [ (gogoproto.moretags) = "yaml:\"connection_id\"" ]; + // proof for the change of the connection state on Chain A: `INIT -> OPEN` + bytes proof_ack = 2 [ (gogoproto.moretags) = "yaml:\"proof_ack\"" ]; + ibcgo.core.client.v1.Height proof_height = 3 [ + (gogoproto.moretags) = "yaml:\"proof_height\"", + (gogoproto.nullable) = false + ]; + string signer = 4; +} + +// MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm +// response type. +message MsgConnectionOpenConfirmResponse {} diff --git a/proto/ibcgo/core/types/v1/genesis.proto b/proto/ibcgo/core/types/v1/genesis.proto new file mode 100644 index 0000000000..fd73a2b054 --- /dev/null +++ b/proto/ibcgo/core/types/v1/genesis.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package ibcgo.core.types.v1; + +option go_package = "github.com/cosmos/ibc-go/core/types"; + +import "gogoproto/gogo.proto"; +import "ibcgo/core/client/v1/genesis.proto"; +import "ibcgo/core/connection/v1/genesis.proto"; +import "ibcgo/core/channel/v1/genesis.proto"; + +// GenesisState defines the ibc module's genesis state. +message GenesisState { + // ICS002 - Clients genesis state + ibcgo.core.client.v1.GenesisState client_genesis = 1 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"client_genesis\"" + ]; + // ICS003 - Connections genesis state + ibcgo.core.connection.v1.GenesisState connection_genesis = 2 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"connection_genesis\"" + ]; + // ICS004 - Channel genesis state + ibcgo.core.channel.v1.GenesisState channel_genesis = 3 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"channel_genesis\"" + ]; +} diff --git a/proto/ibcgo/lightclients/localhost/v1/localhost.proto b/proto/ibcgo/lightclients/localhost/v1/localhost.proto new file mode 100644 index 0000000000..110a81b77a --- /dev/null +++ b/proto/ibcgo/lightclients/localhost/v1/localhost.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package ibcgo.lightclients.localhost.v1; + +option go_package = "github.com/cosmos/ibc-go/light-clients/09-localhost/types"; + +import "gogoproto/gogo.proto"; +import "ibcgo/core/client/v1/client.proto"; + +// ClientState defines a loopback (localhost) client. It requires (read-only) +// access to keys outside the client prefix. +message ClientState { + option (gogoproto.goproto_getters) = false; + // self chain ID + string chain_id = 1 [ (gogoproto.moretags) = "yaml:\"chain_id\"" ]; + // self latest block height + ibcgo.core.client.v1.Height height = 2 [ (gogoproto.nullable) = false ]; +} diff --git a/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto b/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto new file mode 100644 index 0000000000..d4d228489c --- /dev/null +++ b/proto/ibcgo/lightclients/solomachine/v1/solomachine.proto @@ -0,0 +1,206 @@ +syntax = "proto3"; + +package ibcgo.lightclients.solomachine.v1; + +option go_package = "github.com/cosmos/ibc-go/light-clients/06-solomachine/types"; + +import "ibcgo/core/connection/v1/connection.proto"; +import "ibcgo/core/channel/v1/channel.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; + +// ClientState defines a solo machine client that tracks the current consensus +// state and if the client is frozen. +message ClientState { + option (gogoproto.goproto_getters) = false; + // latest sequence of the client state + uint64 sequence = 1; + // frozen sequence of the solo machine + uint64 frozen_sequence = 2 + [ (gogoproto.moretags) = "yaml:\"frozen_sequence\"" ]; + ConsensusState consensus_state = 3 + [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ]; + // when set to true, will allow governance to update a solo machine client. + // The client will be unfrozen if it is frozen. + bool allow_update_after_proposal = 4 + [ (gogoproto.moretags) = "yaml:\"allow_update_after_proposal\"" ]; +} + +// ConsensusState defines a solo machine consensus state. The sequence of a +// consensus state is contained in the "height" key used in storing the +// consensus state. +message ConsensusState { + option (gogoproto.goproto_getters) = false; + // public key of the solo machine + google.protobuf.Any public_key = 1 + [ (gogoproto.moretags) = "yaml:\"public_key\"" ]; + // diversifier allows the same public key to be re-used across different solo + // machine clients (potentially on different chains) without being considered + // misbehaviour. + string diversifier = 2; + uint64 timestamp = 3; +} + +// Header defines a solo machine consensus header +message Header { + option (gogoproto.goproto_getters) = false; + // sequence to update solo machine public key at + uint64 sequence = 1; + uint64 timestamp = 2; + bytes signature = 3; + google.protobuf.Any new_public_key = 4 + [ (gogoproto.moretags) = "yaml:\"new_public_key\"" ]; + string new_diversifier = 5 + [ (gogoproto.moretags) = "yaml:\"new_diversifier\"" ]; +} + +// Misbehaviour defines misbehaviour for a solo machine which consists +// of a sequence and two signatures over different messages at that sequence. +message Misbehaviour { + option (gogoproto.goproto_getters) = false; + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + uint64 sequence = 2; + SignatureAndData signature_one = 3 + [ (gogoproto.moretags) = "yaml:\"signature_one\"" ]; + SignatureAndData signature_two = 4 + [ (gogoproto.moretags) = "yaml:\"signature_two\"" ]; +} + +// SignatureAndData contains a signature and the data signed over to create that +// signature. +message SignatureAndData { + option (gogoproto.goproto_getters) = false; + bytes signature = 1; + DataType data_type = 2 [ (gogoproto.moretags) = "yaml:\"data_type\"" ]; + bytes data = 3; + uint64 timestamp = 4; +} + +// TimestampedSignatureData contains the signature data and the timestamp of the +// signature. +message TimestampedSignatureData { + option (gogoproto.goproto_getters) = false; + bytes signature_data = 1 [ (gogoproto.moretags) = "yaml:\"signature_data\"" ]; + uint64 timestamp = 2; +} + +// SignBytes defines the signed bytes used for signature verification. +message SignBytes { + option (gogoproto.goproto_getters) = false; + + uint64 sequence = 1; + uint64 timestamp = 2; + string diversifier = 3; + // type of the data used + DataType data_type = 4 [ (gogoproto.moretags) = "yaml:\"data_type\"" ]; + // marshaled data + bytes data = 5; +} + +// DataType defines the type of solo machine proof being created. This is done +// to preserve uniqueness of different data sign byte encodings. +enum DataType { + option (gogoproto.goproto_enum_prefix) = false; + + // Default State + DATA_TYPE_UNINITIALIZED_UNSPECIFIED = 0 + [ (gogoproto.enumvalue_customname) = "UNSPECIFIED" ]; + // Data type for client state verification + DATA_TYPE_CLIENT_STATE = 1 [ (gogoproto.enumvalue_customname) = "CLIENT" ]; + // Data type for consensus state verification + DATA_TYPE_CONSENSUS_STATE = 2 + [ (gogoproto.enumvalue_customname) = "CONSENSUS" ]; + // Data type for connection state verification + DATA_TYPE_CONNECTION_STATE = 3 + [ (gogoproto.enumvalue_customname) = "CONNECTION" ]; + // Data type for channel state verification + DATA_TYPE_CHANNEL_STATE = 4 [ (gogoproto.enumvalue_customname) = "CHANNEL" ]; + // Data type for packet commitment verification + DATA_TYPE_PACKET_COMMITMENT = 5 + [ (gogoproto.enumvalue_customname) = "PACKETCOMMITMENT" ]; + // Data type for packet acknowledgement verification + DATA_TYPE_PACKET_ACKNOWLEDGEMENT = 6 + [ (gogoproto.enumvalue_customname) = "PACKETACKNOWLEDGEMENT" ]; + // Data type for packet receipt absence verification + DATA_TYPE_PACKET_RECEIPT_ABSENCE = 7 + [ (gogoproto.enumvalue_customname) = "PACKETRECEIPTABSENCE" ]; + // Data type for next sequence recv verification + DATA_TYPE_NEXT_SEQUENCE_RECV = 8 + [ (gogoproto.enumvalue_customname) = "NEXTSEQUENCERECV" ]; + // Data type for header verification + DATA_TYPE_HEADER = 9 [ (gogoproto.enumvalue_customname) = "HEADER" ]; +} + +// HeaderData returns the SignBytes data for update verification. +message HeaderData { + option (gogoproto.goproto_getters) = false; + + // header public key + google.protobuf.Any new_pub_key = 1 + [ (gogoproto.moretags) = "yaml:\"new_pub_key\"" ]; + // header diversifier + string new_diversifier = 2 + [ (gogoproto.moretags) = "yaml:\"new_diversifier\"" ]; +} + +// ClientStateData returns the SignBytes data for client state verification. +message ClientStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + google.protobuf.Any client_state = 2 + [ (gogoproto.moretags) = "yaml:\"client_state\"" ]; +} + +// ConsensusStateData returns the SignBytes data for consensus state +// verification. +message ConsensusStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + google.protobuf.Any consensus_state = 2 + [ (gogoproto.moretags) = "yaml:\"consensus_state\"" ]; +} + +// ConnectionStateData returns the SignBytes data for connection state +// verification. +message ConnectionStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + ibcgo.core.connection.v1.ConnectionEnd connection = 2; +} + +// ChannelStateData returns the SignBytes data for channel state +// verification. +message ChannelStateData { + option (gogoproto.goproto_getters) = false; + + bytes path = 1; + ibcgo.core.channel.v1.Channel channel = 2; +} + +// PacketCommitmentData returns the SignBytes data for packet commitment +// verification. +message PacketCommitmentData { + bytes path = 1; + bytes commitment = 2; +} + +// PacketAcknowledgementData returns the SignBytes data for acknowledgement +// verification. +message PacketAcknowledgementData { + bytes path = 1; + bytes acknowledgement = 2; +} + +// PacketReceiptAbsenceData returns the SignBytes data for +// packet receipt absence verification. +message PacketReceiptAbsenceData { bytes path = 1; } + +// NextSequenceRecvData returns the SignBytes data for verification of the next +// sequence to be received. +message NextSequenceRecvData { + bytes path = 1; + uint64 next_seq_recv = 2 [ (gogoproto.moretags) = "yaml:\"next_seq_recv\"" ]; +} diff --git a/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto b/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto new file mode 100644 index 0000000000..d6a408b646 --- /dev/null +++ b/proto/ibcgo/lightclients/tendermint/v1/tendermint.proto @@ -0,0 +1,146 @@ +syntax = "proto3"; + +package ibcgo.lightclients.tendermint.v1; + +option go_package = "github.com/cosmos/ibc-go/light-clients/07-tendermint/types"; + +import "tendermint/types/validator.proto"; +import "tendermint/types/types.proto"; +import "confio/proofs.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "ibcgo/core/client/v1/client.proto"; +import "ibcgo/core/commitment/v1/commitment.proto"; +import "gogoproto/gogo.proto"; + +// ClientState from Tendermint tracks the current validator set, latest height, +// and a possible frozen height. +message ClientState { + option (gogoproto.goproto_getters) = false; + + string chain_id = 1; + Fraction trust_level = 2 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"trust_level\"" + ]; + // duration of the period since the LastestTimestamp during which the + // submitted headers are valid for upgrade + google.protobuf.Duration trusting_period = 3 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true, + (gogoproto.moretags) = "yaml:\"trusting_period\"" + ]; + // duration of the staking unbonding period + google.protobuf.Duration unbonding_period = 4 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true, + (gogoproto.moretags) = "yaml:\"unbonding_period\"" + ]; + // defines how much new (untrusted) header's Time can drift into the future. + google.protobuf.Duration max_clock_drift = 5 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true, + (gogoproto.moretags) = "yaml:\"max_clock_drift\"" + ]; + // Block height when the client was frozen due to a misbehaviour + ibcgo.core.client.v1.Height frozen_height = 6 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"frozen_height\"" + ]; + // Latest height the client was updated to + ibcgo.core.client.v1.Height latest_height = 7 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"latest_height\"" + ]; + + // Proof specifications used in verifying counterparty state + repeated ics23.ProofSpec proof_specs = 8 + [ (gogoproto.moretags) = "yaml:\"proof_specs\"" ]; + + // Path at which next upgraded client will be committed. + // Each element corresponds to the key for a single CommitmentProof in the + // chained proof. NOTE: ClientState must stored under + // `{upgradePath}/{upgradeHeight}/clientState` ConsensusState must be stored + // under `{upgradepath}/{upgradeHeight}/consensusState` For SDK chains using + // the default upgrade module, upgrade_path should be []string{"upgrade", + // "upgradedIBCState"}` + repeated string upgrade_path = 9 + [ (gogoproto.moretags) = "yaml:\"upgrade_path\"" ]; + + // This flag, when set to true, will allow governance to recover a client + // which has expired + bool allow_update_after_expiry = 10 + [ (gogoproto.moretags) = "yaml:\"allow_update_after_expiry\"" ]; + // This flag, when set to true, will allow governance to unfreeze a client + // whose chain has experienced a misbehaviour event + bool allow_update_after_misbehaviour = 11 + [ (gogoproto.moretags) = "yaml:\"allow_update_after_misbehaviour\"" ]; +} + +// ConsensusState defines the consensus state from Tendermint. +message ConsensusState { + option (gogoproto.goproto_getters) = false; + + // timestamp that corresponds to the block height in which the ConsensusState + // was stored. + google.protobuf.Timestamp timestamp = 1 + [ (gogoproto.nullable) = false, (gogoproto.stdtime) = true ]; + // commitment root (i.e app hash) + ibcgo.core.commitment.v1.MerkleRoot root = 2 [ (gogoproto.nullable) = false ]; + bytes next_validators_hash = 3 [ + (gogoproto.casttype) = + "github.com/tendermint/tendermint/libs/bytes.HexBytes", + (gogoproto.moretags) = "yaml:\"next_validators_hash\"" + ]; +} + +// Misbehaviour is a wrapper over two conflicting Headers +// that implements Misbehaviour interface expected by ICS-02 +message Misbehaviour { + option (gogoproto.goproto_getters) = false; + + string client_id = 1 [ (gogoproto.moretags) = "yaml:\"client_id\"" ]; + Header header_1 = 2 [ + (gogoproto.customname) = "Header1", + (gogoproto.moretags) = "yaml:\"header_1\"" + ]; + Header header_2 = 3 [ + (gogoproto.customname) = "Header2", + (gogoproto.moretags) = "yaml:\"header_2\"" + ]; +} + +// Header defines the Tendermint client consensus Header. +// It encapsulates all the information necessary to update from a trusted +// Tendermint ConsensusState. The inclusion of TrustedHeight and +// TrustedValidators allows this update to process correctly, so long as the +// ConsensusState for the TrustedHeight exists, this removes race conditions +// among relayers The SignedHeader and ValidatorSet are the new untrusted update +// fields for the client. The TrustedHeight is the height of a stored +// ConsensusState on the client that will be used to verify the new untrusted +// header. The Trusted ConsensusState must be within the unbonding period of +// current time in order to correctly verify, and the TrustedValidators must +// hash to TrustedConsensusState.NextValidatorsHash since that is the last +// trusted validator set at the TrustedHeight. +message Header { + .tendermint.types.SignedHeader signed_header = 1 [ + (gogoproto.embed) = true, + (gogoproto.moretags) = "yaml:\"signed_header\"" + ]; + + .tendermint.types.ValidatorSet validator_set = 2 + [ (gogoproto.moretags) = "yaml:\"validator_set\"" ]; + ibcgo.core.client.v1.Height trusted_height = 3 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"trusted_height\"" + ]; + .tendermint.types.ValidatorSet trusted_validators = 4 + [ (gogoproto.moretags) = "yaml:\"trusted_validators\"" ]; +} + +// Fraction defines the protobuf message type for tmmath.Fraction that only +// supports positive values. +message Fraction { + uint64 numerator = 1; + uint64 denominator = 2; +} diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000000..f213124c8d --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,3 @@ +Generally we should avoid shell scripting and write tests purely in Golang. +However, some libraries are not Goroutine-safe (e.g. app simulations cannot be run safely in parallel), +and OS-native threading may be more efficient for many parallel simulations, so we use shell scripts here. diff --git a/scripts/linkify_changelog.py b/scripts/linkify_changelog.py new file mode 100644 index 0000000000..2492b9a89a --- /dev/null +++ b/scripts/linkify_changelog.py @@ -0,0 +1,15 @@ +import fileinput +import re + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/cosmos/cosmos-sdk/issues/) +# Note that if the number is for a PR, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example: +# +# $ python ./scripts/linkify_changelog.py CHANGELOG.md +for line in fileinput.input(inplace=1): + line = re.sub(r"\s\\#([0-9]+)", r" [\\#\1](https://github.com/cosmos/ibc-go/issues/\1)", line.rstrip()) + print(line) diff --git a/scripts/protoc-swagger-gen.sh b/scripts/protoc-swagger-gen.sh new file mode 100755 index 0000000000..30cf44bcc0 --- /dev/null +++ b/scripts/protoc-swagger-gen.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -eo pipefail + +mkdir -p ./tmp-swagger-gen +proto_dirs=$(find ./proto -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq) +for dir in $proto_dirs; do + + # generate swagger files (filter query files) + query_file=$(find "${dir}" -maxdepth 1 \( -name 'query.proto' -o -name 'service.proto' \)) + if [[ ! -z "$query_file" ]]; then + buf protoc \ + -I "proto" \ + -I "third_party/proto" \ + "$query_file" \ + --swagger_out=./tmp-swagger-gen \ + --swagger_opt=logtostderr=true --swagger_opt=fqn_for_swagger_name=true --swagger_opt=simple_operation_ids=true + fi +done + +# combine swagger files +# uses nodejs package `swagger-combine`. +# all the individual swagger files need to be configured in `config.json` for merging +# swagger-combine ./client/docs/config.json -o ./client/docs/swagger-ui/swagger.yaml -f yaml --continueOnConflictingPaths true --includeDefinitions true + +# clean swagger files +rm -rf ./tmp-swagger-gen diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh new file mode 100755 index 0000000000..656cff26b4 --- /dev/null +++ b/scripts/protocgen.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -eo pipefail + +protoc_gen_gocosmos() { + if ! grep "github.com/gogo/protobuf => github.com/regen-network/protobuf" go.mod &>/dev/null ; then + echo -e "\tPlease run this command from somewhere inside the ibc-go folder." + return 1 + fi + + go get github.com/regen-network/cosmos-proto/protoc-gen-gocosmos@latest 2>/dev/null +} + +protoc_gen_gocosmos + +proto_dirs=$(find ./proto -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq) +for dir in $proto_dirs; do + buf protoc \ + -I "proto" \ + -I "third_party/proto" \ + --gocosmos_out=plugins=interfacetype+grpc,\ +Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types:. \ + --grpc-gateway_out=logtostderr=true:. \ + $(find "${dir}" -maxdepth 1 -name '*.proto') + +done + +# command to generate docs using protoc-gen-doc +buf protoc \ +-I "proto" \ +-I "third_party/proto" \ +--doc_out=./docs/ibc \ +--doc_opt=./docs/protodoc-markdown.tmpl,proto-docs.md \ +$(find "$(pwd)/proto" -maxdepth 5 -name '*.proto') +go mod tidy + + +# move proto files to the right places +cp -r github.com/cosmos/ibc-go/* ./ +rm -rf github.com diff --git a/testing/chain.go b/testing/chain.go new file mode 100644 index 0000000000..0534066d88 --- /dev/null +++ b/testing/chain.go @@ -0,0 +1,910 @@ +package ibctesting + +import ( + "bytes" + "fmt" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/tmhash" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmprotoversion "github.com/tendermint/tendermint/proto/tendermint/version" + tmtypes "github.com/tendermint/tendermint/types" + tmversion "github.com/tendermint/tendermint/version" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/simapp" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + ibctransfertypes "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + "github.com/cosmos/cosmos-sdk/x/ibc/core/types" + ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types" + "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" + "github.com/cosmos/cosmos-sdk/x/staking/teststaking" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +const ( + // Default params constants used to create a TM client + TrustingPeriod time.Duration = time.Hour * 24 * 7 * 2 + UnbondingPeriod time.Duration = time.Hour * 24 * 7 * 3 + MaxClockDrift time.Duration = time.Second * 10 + DefaultDelayPeriod uint64 = 0 + + DefaultChannelVersion = ibctransfertypes.Version + InvalidID = "IDisInvalid" + + ConnectionIDPrefix = "conn" + ChannelIDPrefix = "chan" + + TransferPort = ibctransfertypes.ModuleName + MockPort = mock.ModuleName + + // used for testing UpdateClientProposal + Title = "title" + Description = "description" +) + +var ( + DefaultOpenInitVersion *connectiontypes.Version + + // Default params variables used to create a TM client + DefaultTrustLevel ibctmtypes.Fraction = ibctmtypes.DefaultTrustLevel + TestHash = tmhash.Sum([]byte("TESTING HASH")) + TestCoin = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100)) + + UpgradePath = []string{"upgrade", "upgradedIBCState"} + + ConnectionVersion = connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions())[0] + + MockAcknowledgement = mock.MockAcknowledgement + MockCommitment = mock.MockCommitment +) + +// TestChain is a testing struct that wraps a simapp with the last TM Header, the current ABCI +// header and the validators of the TestChain. It also contains a field called ChainID. This +// is the clientID that *other* chains use to refer to this TestChain. The SenderAccount +// is used for delivering transactions through the application state. +// NOTE: the actual application uses an empty chain-id for ease of testing. +type TestChain struct { + t *testing.T + + App *simapp.SimApp + ChainID string + LastHeader *ibctmtypes.Header // header for last block height committed + CurrentHeader tmproto.Header // header for current block height + QueryServer types.QueryServer + TxConfig client.TxConfig + Codec codec.BinaryMarshaler + + Vals *tmtypes.ValidatorSet + Signers []tmtypes.PrivValidator + + senderPrivKey cryptotypes.PrivKey + SenderAccount authtypes.AccountI + + // IBC specific helpers + ClientIDs []string // ClientID's used on this chain + Connections []*TestConnection // track connectionID's created for this chain +} + +// NewTestChain initializes a new TestChain instance with a single validator set using a +// generated private key. It also creates a sender account to be used for delivering transactions. +// +// The first block height is committed to state in order to allow for client creations on +// counterparty chains. The TestChain will return with a block height starting at 2. +// +// Time management is handled by the Coordinator in order to ensure synchrony between chains. +// Each update of any chain increments the block header time for all chains by 5 seconds. +func NewTestChain(t *testing.T, chainID string) *TestChain { + // generate validator private/public key + privVal := mock.NewPV() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + + // create validator set with single validator + validator := tmtypes.NewValidator(pubKey, 1) + valSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{validator}) + signers := []tmtypes.PrivValidator{privVal} + + // generate genesis account + senderPrivKey := secp256k1.GenPrivKey() + acc := authtypes.NewBaseAccount(senderPrivKey.PubKey().Address().Bytes(), senderPrivKey.PubKey(), 0, 0) + balance := banktypes.Balance{ + Address: acc.GetAddress().String(), + Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100000000000000))), + } + + app := simapp.SetupWithGenesisValSet(t, valSet, []authtypes.GenesisAccount{acc}, balance) + + // create current header and call begin block + header := tmproto.Header{ + ChainID: chainID, + Height: 1, + Time: globalStartTime, + } + + txConfig := simapp.MakeTestEncodingConfig().TxConfig + + // create an account to send transactions from + chain := &TestChain{ + t: t, + ChainID: chainID, + App: app, + CurrentHeader: header, + QueryServer: app.IBCKeeper, + TxConfig: txConfig, + Codec: app.AppCodec(), + Vals: valSet, + Signers: signers, + senderPrivKey: senderPrivKey, + SenderAccount: acc, + ClientIDs: make([]string, 0), + Connections: make([]*TestConnection, 0), + } + + cap := chain.App.IBCKeeper.PortKeeper.BindPort(chain.GetContext(), MockPort) + err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(MockPort)) + require.NoError(t, err) + + chain.NextBlock() + + return chain +} + +// GetContext returns the current context for the application. +func (chain *TestChain) GetContext() sdk.Context { + return chain.App.BaseApp.NewContext(false, chain.CurrentHeader) +} + +// QueryProof performs an abci query with the given key and returns the proto encoded merkle proof +// for the query and the height at which the proof will succeed on a tendermint verifier. +func (chain *TestChain) QueryProof(key []byte) ([]byte, clienttypes.Height) { + res := chain.App.Query(abci.RequestQuery{ + Path: fmt.Sprintf("store/%s/key", host.StoreKey), + Height: chain.App.LastBlockHeight() - 1, + Data: key, + Prove: true, + }) + + merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) + require.NoError(chain.t, err) + + proof, err := chain.App.AppCodec().MarshalBinaryBare(&merkleProof) + require.NoError(chain.t, err) + + revision := clienttypes.ParseChainID(chain.ChainID) + + // proof height + 1 is returned as the proof created corresponds to the height the proof + // was created in the IAVL tree. Tendermint and subsequently the clients that rely on it + // have heights 1 above the IAVL tree. Thus we return proof height + 1 + return proof, clienttypes.NewHeight(revision, uint64(res.Height)+1) +} + +// QueryUpgradeProof performs an abci query with the given key and returns the proto encoded merkle proof +// for the query and the height at which the proof will succeed on a tendermint verifier. +func (chain *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, clienttypes.Height) { + res := chain.App.Query(abci.RequestQuery{ + Path: "store/upgrade/key", + Height: int64(height - 1), + Data: key, + Prove: true, + }) + + merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) + require.NoError(chain.t, err) + + proof, err := chain.App.AppCodec().MarshalBinaryBare(&merkleProof) + require.NoError(chain.t, err) + + revision := clienttypes.ParseChainID(chain.ChainID) + + // proof height + 1 is returned as the proof created corresponds to the height the proof + // was created in the IAVL tree. Tendermint and subsequently the clients that rely on it + // have heights 1 above the IAVL tree. Thus we return proof height + 1 + return proof, clienttypes.NewHeight(revision, uint64(res.Height+1)) +} + +// QueryClientStateProof performs and abci query for a client state +// stored with a given clientID and returns the ClientState along with the proof +func (chain *TestChain) QueryClientStateProof(clientID string) (exported.ClientState, []byte) { + // retrieve client state to provide proof for + clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID) + require.True(chain.t, found) + + clientKey := host.FullClientStateKey(clientID) + proofClient, _ := chain.QueryProof(clientKey) + + return clientState, proofClient +} + +// QueryConsensusStateProof performs an abci query for a consensus state +// stored on the given clientID. The proof and consensusHeight are returned. +func (chain *TestChain) QueryConsensusStateProof(clientID string) ([]byte, clienttypes.Height) { + clientState := chain.GetClientState(clientID) + + consensusHeight := clientState.GetLatestHeight().(clienttypes.Height) + consensusKey := host.FullConsensusStateKey(clientID, consensusHeight) + proofConsensus, _ := chain.QueryProof(consensusKey) + + return proofConsensus, consensusHeight +} + +// NextBlock sets the last header to the current header and increments the current header to be +// at the next block height. It does not update the time as that is handled by the Coordinator. +// +// CONTRACT: this function must only be called after app.Commit() occurs +func (chain *TestChain) NextBlock() { + // set the last header to the current header + // use nil trusted fields + chain.LastHeader = chain.CurrentTMClientHeader() + + // increment the current header + chain.CurrentHeader = tmproto.Header{ + ChainID: chain.ChainID, + Height: chain.App.LastBlockHeight() + 1, + AppHash: chain.App.LastCommitID().Hash, + // NOTE: the time is increased by the coordinator to maintain time synchrony amongst + // chains. + Time: chain.CurrentHeader.Time, + ValidatorsHash: chain.Vals.Hash(), + NextValidatorsHash: chain.Vals.Hash(), + } + + chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader}) + +} + +// sendMsgs delivers a transaction through the application without returning the result. +func (chain *TestChain) sendMsgs(msgs ...sdk.Msg) error { + _, err := chain.SendMsgs(msgs...) + return err +} + +// SendMsgs delivers a transaction through the application. It updates the senders sequence +// number and updates the TestChain's headers. It returns the result and error if one +// occurred. +func (chain *TestChain) SendMsgs(msgs ...sdk.Msg) (*sdk.Result, error) { + _, r, err := simapp.SignCheckDeliver( + chain.t, + chain.TxConfig, + chain.App.BaseApp, + chain.GetContext().BlockHeader(), + msgs, + chain.ChainID, + []uint64{chain.SenderAccount.GetAccountNumber()}, + []uint64{chain.SenderAccount.GetSequence()}, + true, true, chain.senderPrivKey, + ) + if err != nil { + return nil, err + } + + // SignCheckDeliver calls app.Commit() + chain.NextBlock() + + // increment sequence for successful transaction execution + chain.SenderAccount.SetSequence(chain.SenderAccount.GetSequence() + 1) + + return r, nil +} + +// GetClientState retrieves the client state for the provided clientID. The client is +// expected to exist otherwise testing will fail. +func (chain *TestChain) GetClientState(clientID string) exported.ClientState { + clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID) + require.True(chain.t, found) + + return clientState +} + +// GetConsensusState retrieves the consensus state for the provided clientID and height. +// It will return a success boolean depending on if consensus state exists or not. +func (chain *TestChain) GetConsensusState(clientID string, height exported.Height) (exported.ConsensusState, bool) { + return chain.App.IBCKeeper.ClientKeeper.GetClientConsensusState(chain.GetContext(), clientID, height) +} + +// GetValsAtHeight will return the validator set of the chain at a given height. It will return +// a success boolean depending on if the validator set exists or not at that height. +func (chain *TestChain) GetValsAtHeight(height int64) (*tmtypes.ValidatorSet, bool) { + histInfo, ok := chain.App.StakingKeeper.GetHistoricalInfo(chain.GetContext(), height) + if !ok { + return nil, false + } + + valSet := stakingtypes.Validators(histInfo.Valset) + + tmValidators, err := teststaking.ToTmValidators(valSet) + if err != nil { + panic(err) + } + return tmtypes.NewValidatorSet(tmValidators), true +} + +// GetConnection retrieves an IBC Connection for the provided TestConnection. The +// connection is expected to exist otherwise testing will fail. +func (chain *TestChain) GetConnection(testConnection *TestConnection) connectiontypes.ConnectionEnd { + connection, found := chain.App.IBCKeeper.ConnectionKeeper.GetConnection(chain.GetContext(), testConnection.ID) + require.True(chain.t, found) + + return connection +} + +// GetChannel retrieves an IBC Channel for the provided TestChannel. The channel +// is expected to exist otherwise testing will fail. +func (chain *TestChain) GetChannel(testChannel TestChannel) channeltypes.Channel { + channel, found := chain.App.IBCKeeper.ChannelKeeper.GetChannel(chain.GetContext(), testChannel.PortID, testChannel.ID) + require.True(chain.t, found) + + return channel +} + +// GetAcknowledgement retrieves an acknowledgement for the provided packet. If the +// acknowledgement does not exist then testing will fail. +func (chain *TestChain) GetAcknowledgement(packet exported.PacketI) []byte { + ack, found := chain.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(chain.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + require.True(chain.t, found) + + return ack +} + +// GetPrefix returns the prefix for used by a chain in connection creation +func (chain *TestChain) GetPrefix() commitmenttypes.MerklePrefix { + return commitmenttypes.NewMerklePrefix(chain.App.IBCKeeper.ConnectionKeeper.GetCommitmentPrefix().Bytes()) +} + +// NewClientID appends a new clientID string in the format: +// ClientFor +func (chain *TestChain) NewClientID(clientType string) string { + clientID := fmt.Sprintf("%s-%s", clientType, strconv.Itoa(len(chain.ClientIDs))) + chain.ClientIDs = append(chain.ClientIDs, clientID) + return clientID +} + +// AddTestConnection appends a new TestConnection which contains references +// to the connection id, client id and counterparty client id. +func (chain *TestChain) AddTestConnection(clientID, counterpartyClientID string) *TestConnection { + conn := chain.ConstructNextTestConnection(clientID, counterpartyClientID) + + chain.Connections = append(chain.Connections, conn) + return conn +} + +// ConstructNextTestConnection constructs the next test connection to be +// created given a clientID and counterparty clientID. The connection id +// format: -conn +func (chain *TestChain) ConstructNextTestConnection(clientID, counterpartyClientID string) *TestConnection { + connectionID := connectiontypes.FormatConnectionIdentifier(uint64(len(chain.Connections))) + return &TestConnection{ + ID: connectionID, + ClientID: clientID, + NextChannelVersion: DefaultChannelVersion, + CounterpartyClientID: counterpartyClientID, + } +} + +// GetFirstTestConnection returns the first test connection for a given clientID. +// The connection may or may not exist in the chain state. +func (chain *TestChain) GetFirstTestConnection(clientID, counterpartyClientID string) *TestConnection { + if len(chain.Connections) > 0 { + return chain.Connections[0] + } + + return chain.ConstructNextTestConnection(clientID, counterpartyClientID) +} + +// AddTestChannel appends a new TestChannel which contains references to the port and channel ID +// used for channel creation and interaction. See 'NextTestChannel' for channel ID naming format. +func (chain *TestChain) AddTestChannel(conn *TestConnection, portID string) TestChannel { + channel := chain.NextTestChannel(conn, portID) + conn.Channels = append(conn.Channels, channel) + return channel +} + +// NextTestChannel returns the next test channel to be created on this connection, but does not +// add it to the list of created channels. This function is expected to be used when the caller +// has not created the associated channel in app state, but would still like to refer to the +// non-existent channel usually to test for its non-existence. +// +// channel ID format: -chan +// +// The port is passed in by the caller. +func (chain *TestChain) NextTestChannel(conn *TestConnection, portID string) TestChannel { + nextChanSeq := chain.App.IBCKeeper.ChannelKeeper.GetNextChannelSequence(chain.GetContext()) + channelID := channeltypes.FormatChannelIdentifier(nextChanSeq) + return TestChannel{ + PortID: portID, + ID: channelID, + ClientID: conn.ClientID, + CounterpartyClientID: conn.CounterpartyClientID, + Version: conn.NextChannelVersion, + } +} + +// ConstructMsgCreateClient constructs a message to create a new client state (tendermint or solomachine). +// NOTE: a solo machine client will be created with an empty diversifier. +func (chain *TestChain) ConstructMsgCreateClient(counterparty *TestChain, clientID string, clientType string) *clienttypes.MsgCreateClient { + var ( + clientState exported.ClientState + consensusState exported.ConsensusState + ) + + switch clientType { + case exported.Tendermint: + height := counterparty.LastHeader.GetHeight().(clienttypes.Height) + clientState = ibctmtypes.NewClientState( + counterparty.ChainID, DefaultTrustLevel, TrustingPeriod, UnbondingPeriod, MaxClockDrift, + height, commitmenttypes.GetSDKSpecs(), UpgradePath, false, false, + ) + consensusState = counterparty.LastHeader.ConsensusState() + case exported.Solomachine: + solo := NewSolomachine(chain.t, chain.Codec, clientID, "", 1) + clientState = solo.ClientState() + consensusState = solo.ConsensusState() + default: + chain.t.Fatalf("unsupported client state type %s", clientType) + } + + msg, err := clienttypes.NewMsgCreateClient( + clientState, consensusState, chain.SenderAccount.GetAddress(), + ) + require.NoError(chain.t, err) + return msg +} + +// CreateTMClient will construct and execute a 07-tendermint MsgCreateClient. A counterparty +// client will be created on the (target) chain. +func (chain *TestChain) CreateTMClient(counterparty *TestChain, clientID string) error { + // construct MsgCreateClient using counterparty + msg := chain.ConstructMsgCreateClient(counterparty, clientID, exported.Tendermint) + return chain.sendMsgs(msg) +} + +// UpdateTMClient will construct and execute a 07-tendermint MsgUpdateClient. The counterparty +// client will be updated on the (target) chain. UpdateTMClient mocks the relayer flow +// necessary for updating a Tendermint client. +func (chain *TestChain) UpdateTMClient(counterparty *TestChain, clientID string) error { + header, err := chain.ConstructUpdateTMClientHeader(counterparty, clientID) + require.NoError(chain.t, err) + + msg, err := clienttypes.NewMsgUpdateClient( + clientID, header, + chain.SenderAccount.GetAddress(), + ) + require.NoError(chain.t, err) + + return chain.sendMsgs(msg) +} + +// ConstructUpdateTMClientHeader will construct a valid 07-tendermint Header to update the +// light client on the source chain. +func (chain *TestChain) ConstructUpdateTMClientHeader(counterparty *TestChain, clientID string) (*ibctmtypes.Header, error) { + header := counterparty.LastHeader + // Relayer must query for LatestHeight on client to get TrustedHeight + trustedHeight := chain.GetClientState(clientID).GetLatestHeight().(clienttypes.Height) + var ( + tmTrustedVals *tmtypes.ValidatorSet + ok bool + ) + // Once we get TrustedHeight from client, we must query the validators from the counterparty chain + // If the LatestHeight == LastHeader.Height, then TrustedValidators are current validators + // If LatestHeight < LastHeader.Height, we can query the historical validator set from HistoricalInfo + if trustedHeight == counterparty.LastHeader.GetHeight() { + tmTrustedVals = counterparty.Vals + } else { + // NOTE: We need to get validators from counterparty at height: trustedHeight+1 + // since the last trusted validators for a header at height h + // is the NextValidators at h+1 committed to in header h by + // NextValidatorsHash + tmTrustedVals, ok = counterparty.GetValsAtHeight(int64(trustedHeight.RevisionHeight + 1)) + if !ok { + return nil, sdkerrors.Wrapf(ibctmtypes.ErrInvalidHeaderHeight, "could not retrieve trusted validators at trustedHeight: %d", trustedHeight) + } + } + // inject trusted fields into last header + // for now assume revision number is 0 + header.TrustedHeight = trustedHeight + + trustedVals, err := tmTrustedVals.ToProto() + if err != nil { + return nil, err + } + header.TrustedValidators = trustedVals + + return header, nil + +} + +// ExpireClient fast forwards the chain's block time by the provided amount of time which will +// expire any clients with a trusting period less than or equal to this amount of time. +func (chain *TestChain) ExpireClient(amount time.Duration) { + chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(amount) +} + +// CurrentTMClientHeader creates a TM header using the current header parameters +// on the chain. The trusted fields in the header are set to nil. +func (chain *TestChain) CurrentTMClientHeader() *ibctmtypes.Header { + return chain.CreateTMClientHeader(chain.ChainID, chain.CurrentHeader.Height, clienttypes.Height{}, chain.CurrentHeader.Time, chain.Vals, nil, chain.Signers) +} + +// CreateTMClientHeader creates a TM header to update the TM client. Args are passed in to allow +// caller flexibility to use params that differ from the chain. +func (chain *TestChain) CreateTMClientHeader(chainID string, blockHeight int64, trustedHeight clienttypes.Height, timestamp time.Time, tmValSet, tmTrustedVals *tmtypes.ValidatorSet, signers []tmtypes.PrivValidator) *ibctmtypes.Header { + var ( + valSet *tmproto.ValidatorSet + trustedVals *tmproto.ValidatorSet + ) + require.NotNil(chain.t, tmValSet) + + vsetHash := tmValSet.Hash() + + tmHeader := tmtypes.Header{ + Version: tmprotoversion.Consensus{Block: tmversion.BlockProtocol, App: 2}, + ChainID: chainID, + Height: blockHeight, + Time: timestamp, + LastBlockID: MakeBlockID(make([]byte, tmhash.Size), 10_000, make([]byte, tmhash.Size)), + LastCommitHash: chain.App.LastCommitID().Hash, + DataHash: tmhash.Sum([]byte("data_hash")), + ValidatorsHash: vsetHash, + NextValidatorsHash: vsetHash, + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: chain.CurrentHeader.AppHash, + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: tmValSet.Proposer.Address, //nolint:staticcheck + } + hhash := tmHeader.Hash() + blockID := MakeBlockID(hhash, 3, tmhash.Sum([]byte("part_set"))) + voteSet := tmtypes.NewVoteSet(chainID, blockHeight, 1, tmproto.PrecommitType, tmValSet) + + commit, err := tmtypes.MakeCommit(blockID, blockHeight, 1, voteSet, signers, timestamp) + require.NoError(chain.t, err) + + signedHeader := &tmproto.SignedHeader{ + Header: tmHeader.ToProto(), + Commit: commit.ToProto(), + } + + if tmValSet != nil { + valSet, err = tmValSet.ToProto() + if err != nil { + panic(err) + } + } + + if tmTrustedVals != nil { + trustedVals, err = tmTrustedVals.ToProto() + if err != nil { + panic(err) + } + } + + // The trusted fields may be nil. They may be filled before relaying messages to a client. + // The relayer is responsible for querying client and injecting appropriate trusted fields. + return &ibctmtypes.Header{ + SignedHeader: signedHeader, + ValidatorSet: valSet, + TrustedHeight: trustedHeight, + TrustedValidators: trustedVals, + } +} + +// MakeBlockID copied unimported test functions from tmtypes to use them here +func MakeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) tmtypes.BlockID { + return tmtypes.BlockID{ + Hash: hash, + PartSetHeader: tmtypes.PartSetHeader{ + Total: partSetSize, + Hash: partSetHash, + }, + } +} + +// CreateSortedSignerArray takes two PrivValidators, and the corresponding Validator structs +// (including voting power). It returns a signer array of PrivValidators that matches the +// sorting of ValidatorSet. +// The sorting is first by .VotingPower (descending), with secondary index of .Address (ascending). +func CreateSortedSignerArray(altPrivVal, suitePrivVal tmtypes.PrivValidator, + altVal, suiteVal *tmtypes.Validator) []tmtypes.PrivValidator { + + switch { + case altVal.VotingPower > suiteVal.VotingPower: + return []tmtypes.PrivValidator{altPrivVal, suitePrivVal} + case altVal.VotingPower < suiteVal.VotingPower: + return []tmtypes.PrivValidator{suitePrivVal, altPrivVal} + default: + if bytes.Compare(altVal.Address, suiteVal.Address) == -1 { + return []tmtypes.PrivValidator{altPrivVal, suitePrivVal} + } + return []tmtypes.PrivValidator{suitePrivVal, altPrivVal} + } +} + +// ConnectionOpenInit will construct and execute a MsgConnectionOpenInit. +func (chain *TestChain) ConnectionOpenInit( + counterparty *TestChain, + connection, counterpartyConnection *TestConnection, +) error { + msg := connectiontypes.NewMsgConnectionOpenInit( + connection.ClientID, + connection.CounterpartyClientID, + counterparty.GetPrefix(), DefaultOpenInitVersion, DefaultDelayPeriod, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// ConnectionOpenTry will construct and execute a MsgConnectionOpenTry. +func (chain *TestChain) ConnectionOpenTry( + counterparty *TestChain, + connection, counterpartyConnection *TestConnection, +) error { + counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID) + + connectionKey := host.ConnectionKey(counterpartyConnection.ID) + proofInit, proofHeight := counterparty.QueryProof(connectionKey) + + proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID) + + msg := connectiontypes.NewMsgConnectionOpenTry( + "", connection.ClientID, // does not support handshake continuation + counterpartyConnection.ID, counterpartyConnection.ClientID, + counterpartyClient, counterparty.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, DefaultDelayPeriod, + proofInit, proofClient, proofConsensus, + proofHeight, consensusHeight, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// ConnectionOpenAck will construct and execute a MsgConnectionOpenAck. +func (chain *TestChain) ConnectionOpenAck( + counterparty *TestChain, + connection, counterpartyConnection *TestConnection, +) error { + counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID) + + connectionKey := host.ConnectionKey(counterpartyConnection.ID) + proofTry, proofHeight := counterparty.QueryProof(connectionKey) + + proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID) + + msg := connectiontypes.NewMsgConnectionOpenAck( + connection.ID, counterpartyConnection.ID, counterpartyClient, // testing doesn't use flexible selection + proofTry, proofClient, proofConsensus, + proofHeight, consensusHeight, + ConnectionVersion, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// ConnectionOpenConfirm will construct and execute a MsgConnectionOpenConfirm. +func (chain *TestChain) ConnectionOpenConfirm( + counterparty *TestChain, + connection, counterpartyConnection *TestConnection, +) error { + connectionKey := host.ConnectionKey(counterpartyConnection.ID) + proof, height := counterparty.QueryProof(connectionKey) + + msg := connectiontypes.NewMsgConnectionOpenConfirm( + connection.ID, + proof, height, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// CreatePortCapability binds and claims a capability for the given portID if it does not +// already exist. This function will fail testing on any resulting error. +// NOTE: only creation of a capbility for a transfer or mock port is supported +// Other applications must bind to the port in InitGenesis or modify this code. +func (chain *TestChain) CreatePortCapability(portID string) { + // check if the portId is already binded, if not bind it + _, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID)) + if !ok { + // create capability using the IBC capability keeper + cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), host.PortPath(portID)) + require.NoError(chain.t, err) + + switch portID { + case MockPort: + // claim capability using the mock capability keeper + err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID)) + require.NoError(chain.t, err) + case TransferPort: + // claim capability using the transfer capability keeper + err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID)) + require.NoError(chain.t, err) + default: + panic(fmt.Sprintf("unsupported ibc testing package port ID %s", portID)) + } + } + + chain.App.Commit() + + chain.NextBlock() +} + +// GetPortCapability returns the port capability for the given portID. The capability must +// exist, otherwise testing will fail. +func (chain *TestChain) GetPortCapability(portID string) *capabilitytypes.Capability { + cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID)) + require.True(chain.t, ok) + + return cap +} + +// CreateChannelCapability binds and claims a capability for the given portID and channelID +// if it does not already exist. This function will fail testing on any resulting error. +func (chain *TestChain) CreateChannelCapability(portID, channelID string) { + capName := host.ChannelCapabilityPath(portID, channelID) + // check if the portId is already binded, if not bind it + _, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), capName) + if !ok { + cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), capName) + require.NoError(chain.t, err) + err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, capName) + require.NoError(chain.t, err) + } + + chain.App.Commit() + + chain.NextBlock() +} + +// GetChannelCapability returns the channel capability for the given portID and channelID. +// The capability must exist, otherwise testing will fail. +func (chain *TestChain) GetChannelCapability(portID, channelID string) *capabilitytypes.Capability { + cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.ChannelCapabilityPath(portID, channelID)) + require.True(chain.t, ok) + + return cap +} + +// ChanOpenInit will construct and execute a MsgChannelOpenInit. +func (chain *TestChain) ChanOpenInit( + ch, counterparty TestChannel, + order channeltypes.Order, + connectionID string, +) error { + msg := channeltypes.NewMsgChannelOpenInit( + ch.PortID, + ch.Version, order, []string{connectionID}, + counterparty.PortID, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// ChanOpenTry will construct and execute a MsgChannelOpenTry. +func (chain *TestChain) ChanOpenTry( + counterparty *TestChain, + ch, counterpartyCh TestChannel, + order channeltypes.Order, + connectionID string, +) error { + proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID)) + + msg := channeltypes.NewMsgChannelOpenTry( + ch.PortID, "", // does not support handshake continuation + ch.Version, order, []string{connectionID}, + counterpartyCh.PortID, counterpartyCh.ID, counterpartyCh.Version, + proof, height, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// ChanOpenAck will construct and execute a MsgChannelOpenAck. +func (chain *TestChain) ChanOpenAck( + counterparty *TestChain, + ch, counterpartyCh TestChannel, +) error { + proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID)) + + msg := channeltypes.NewMsgChannelOpenAck( + ch.PortID, ch.ID, + counterpartyCh.ID, counterpartyCh.Version, // testing doesn't use flexible selection + proof, height, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// ChanOpenConfirm will construct and execute a MsgChannelOpenConfirm. +func (chain *TestChain) ChanOpenConfirm( + counterparty *TestChain, + ch, counterpartyCh TestChannel, +) error { + proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID)) + + msg := channeltypes.NewMsgChannelOpenConfirm( + ch.PortID, ch.ID, + proof, height, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// ChanCloseInit will construct and execute a MsgChannelCloseInit. +// +// NOTE: does not work with ibc-transfer module +func (chain *TestChain) ChanCloseInit( + counterparty *TestChain, + channel TestChannel, +) error { + msg := channeltypes.NewMsgChannelCloseInit( + channel.PortID, channel.ID, + chain.SenderAccount.GetAddress(), + ) + return chain.sendMsgs(msg) +} + +// GetPacketData returns a ibc-transfer marshalled packet to be used for +// callback testing. +func (chain *TestChain) GetPacketData(counterparty *TestChain) []byte { + packet := ibctransfertypes.FungibleTokenPacketData{ + Denom: TestCoin.Denom, + Amount: TestCoin.Amount.Uint64(), + Sender: chain.SenderAccount.GetAddress().String(), + Receiver: counterparty.SenderAccount.GetAddress().String(), + } + + return packet.GetBytes() +} + +// SendPacket simulates sending a packet through the channel keeper. No message needs to be +// passed since this call is made from a module. +func (chain *TestChain) SendPacket( + packet exported.PacketI, +) error { + channelCap := chain.GetChannelCapability(packet.GetSourcePort(), packet.GetSourceChannel()) + + // no need to send message, acting as a module + err := chain.App.IBCKeeper.ChannelKeeper.SendPacket(chain.GetContext(), channelCap, packet) + if err != nil { + return err + } + + // commit changes + chain.App.Commit() + chain.NextBlock() + + return nil +} + +// WriteAcknowledgement simulates writing an acknowledgement to the chain. +func (chain *TestChain) WriteAcknowledgement( + packet exported.PacketI, +) error { + channelCap := chain.GetChannelCapability(packet.GetDestPort(), packet.GetDestChannel()) + + // no need to send message, acting as a handler + err := chain.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(chain.GetContext(), channelCap, packet, TestHash) + if err != nil { + return err + } + + // commit changes + chain.App.Commit() + chain.NextBlock() + + return nil +} diff --git a/testing/chain_test.go b/testing/chain_test.go new file mode 100644 index 0000000000..361a9c4c15 --- /dev/null +++ b/testing/chain_test.go @@ -0,0 +1,47 @@ +package ibctesting_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + tmtypes "github.com/tendermint/tendermint/types" + + ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing" + "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +func TestCreateSortedSignerArray(t *testing.T) { + privVal1 := mock.NewPV() + pubKey1, err := privVal1.GetPubKey() + require.NoError(t, err) + + privVal2 := mock.NewPV() + pubKey2, err := privVal2.GetPubKey() + require.NoError(t, err) + + validator1 := tmtypes.NewValidator(pubKey1, 1) + validator2 := tmtypes.NewValidator(pubKey2, 2) + + expected := []tmtypes.PrivValidator{privVal2, privVal1} + + actual := ibctesting.CreateSortedSignerArray(privVal1, privVal2, validator1, validator2) + require.Equal(t, expected, actual) + + // swap order + actual = ibctesting.CreateSortedSignerArray(privVal2, privVal1, validator2, validator1) + require.Equal(t, expected, actual) + + // smaller address + validator1.Address = []byte{1} + validator2.Address = []byte{2} + validator2.VotingPower = 1 + + expected = []tmtypes.PrivValidator{privVal1, privVal2} + + actual = ibctesting.CreateSortedSignerArray(privVal1, privVal2, validator1, validator2) + require.Equal(t, expected, actual) + + // swap order + actual = ibctesting.CreateSortedSignerArray(privVal2, privVal1, validator2, validator1) + require.Equal(t, expected, actual) +} diff --git a/testing/coordinator.go b/testing/coordinator.go new file mode 100644 index 0000000000..ade28b4df3 --- /dev/null +++ b/testing/coordinator.go @@ -0,0 +1,700 @@ +package ibctesting + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" +) + +var ( + ChainIDPrefix = "testchain" + globalStartTime = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + TimeIncrement = time.Second * 5 +) + +// Coordinator is a testing struct which contains N TestChain's. It handles keeping all chains +// in sync with regards to time. +type Coordinator struct { + t *testing.T + + Chains map[string]*TestChain +} + +// NewCoordinator initializes Coordinator with N TestChain's +func NewCoordinator(t *testing.T, n int) *Coordinator { + chains := make(map[string]*TestChain) + + for i := 0; i < n; i++ { + chainID := GetChainID(i) + chains[chainID] = NewTestChain(t, chainID) + } + return &Coordinator{ + t: t, + Chains: chains, + } +} + +// Setup constructs a TM client, connection, and channel on both chains provided. It will +// fail if any error occurs. The clientID's, TestConnections, and TestChannels are returned +// for both chains. The channels created are connected to the ibc-transfer application. +func (coord *Coordinator) Setup( + chainA, chainB *TestChain, order channeltypes.Order, +) (string, string, *TestConnection, *TestConnection, TestChannel, TestChannel) { + clientA, clientB, connA, connB := coord.SetupClientConnections(chainA, chainB, exported.Tendermint) + + // channels can also be referenced through the returned connections + channelA, channelB := coord.CreateMockChannels(chainA, chainB, connA, connB, order) + + return clientA, clientB, connA, connB, channelA, channelB +} + +// SetupClients is a helper function to create clients on both chains. It assumes the +// caller does not anticipate any errors. +func (coord *Coordinator) SetupClients( + chainA, chainB *TestChain, + clientType string, +) (string, string) { + + clientA, err := coord.CreateClient(chainA, chainB, clientType) + require.NoError(coord.t, err) + + clientB, err := coord.CreateClient(chainB, chainA, clientType) + require.NoError(coord.t, err) + + return clientA, clientB +} + +// SetupClientConnections is a helper function to create clients and the appropriate +// connections on both the source and counterparty chain. It assumes the caller does not +// anticipate any errors. +func (coord *Coordinator) SetupClientConnections( + chainA, chainB *TestChain, + clientType string, +) (string, string, *TestConnection, *TestConnection) { + + clientA, clientB := coord.SetupClients(chainA, chainB, clientType) + + connA, connB := coord.CreateConnection(chainA, chainB, clientA, clientB) + + return clientA, clientB, connA, connB +} + +// CreateClient creates a counterparty client on the source chain and returns the clientID. +func (coord *Coordinator) CreateClient( + source, counterparty *TestChain, + clientType string, +) (clientID string, err error) { + coord.CommitBlock(source, counterparty) + + clientID = source.NewClientID(clientType) + + switch clientType { + case exported.Tendermint: + err = source.CreateTMClient(counterparty, clientID) + + default: + err = fmt.Errorf("client type %s is not supported", clientType) + } + + if err != nil { + return "", err + } + + coord.IncrementTime() + + return clientID, nil +} + +// UpdateClient updates a counterparty client on the source chain. +func (coord *Coordinator) UpdateClient( + source, counterparty *TestChain, + clientID string, + clientType string, +) (err error) { + coord.CommitBlock(source, counterparty) + + switch clientType { + case exported.Tendermint: + err = source.UpdateTMClient(counterparty, clientID) + + default: + err = fmt.Errorf("client type %s is not supported", clientType) + } + + if err != nil { + return err + } + + coord.IncrementTime() + + return nil +} + +// CreateConnection constructs and executes connection handshake messages in order to create +// OPEN channels on chainA and chainB. The connection information of for chainA and chainB +// are returned within a TestConnection struct. The function expects the connections to be +// successfully opened otherwise testing will fail. +func (coord *Coordinator) CreateConnection( + chainA, chainB *TestChain, + clientA, clientB string, +) (*TestConnection, *TestConnection) { + + connA, connB, err := coord.ConnOpenInit(chainA, chainB, clientA, clientB) + require.NoError(coord.t, err) + + err = coord.ConnOpenTry(chainB, chainA, connB, connA) + require.NoError(coord.t, err) + + err = coord.ConnOpenAck(chainA, chainB, connA, connB) + require.NoError(coord.t, err) + + err = coord.ConnOpenConfirm(chainB, chainA, connB, connA) + require.NoError(coord.t, err) + + return connA, connB +} + +// CreateMockChannels constructs and executes channel handshake messages to create OPEN +// channels that use a mock application module that returns nil on all callbacks. This +// function is expects the channels to be successfully opened otherwise testing will +// fail. +func (coord *Coordinator) CreateMockChannels( + chainA, chainB *TestChain, + connA, connB *TestConnection, + order channeltypes.Order, +) (TestChannel, TestChannel) { + return coord.CreateChannel(chainA, chainB, connA, connB, MockPort, MockPort, order) +} + +// CreateTransferChannels constructs and executes channel handshake messages to create OPEN +// ibc-transfer channels on chainA and chainB. The function expects the channels to be +// successfully opened otherwise testing will fail. +func (coord *Coordinator) CreateTransferChannels( + chainA, chainB *TestChain, + connA, connB *TestConnection, + order channeltypes.Order, +) (TestChannel, TestChannel) { + return coord.CreateChannel(chainA, chainB, connA, connB, TransferPort, TransferPort, order) +} + +// CreateChannel constructs and executes channel handshake messages in order to create +// OPEN channels on chainA and chainB. The function expects the channels to be successfully +// opened otherwise testing will fail. +func (coord *Coordinator) CreateChannel( + chainA, chainB *TestChain, + connA, connB *TestConnection, + sourcePortID, counterpartyPortID string, + order channeltypes.Order, +) (TestChannel, TestChannel) { + + channelA, channelB, err := coord.ChanOpenInit(chainA, chainB, connA, connB, sourcePortID, counterpartyPortID, order) + require.NoError(coord.t, err) + + err = coord.ChanOpenTry(chainB, chainA, channelB, channelA, connB, order) + require.NoError(coord.t, err) + + err = coord.ChanOpenAck(chainA, chainB, channelA, channelB) + require.NoError(coord.t, err) + + err = coord.ChanOpenConfirm(chainB, chainA, channelB, channelA) + require.NoError(coord.t, err) + + return channelA, channelB +} + +// SendPacket sends a packet through the channel keeper on the source chain and updates the +// counterparty client for the source chain. +func (coord *Coordinator) SendPacket( + source, counterparty *TestChain, + packet exported.PacketI, + counterpartyClientID string, +) error { + if err := source.SendPacket(packet); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + counterpartyClientID, exported.Tendermint, + ) +} + +// RecvPacket receives a channel packet on the counterparty chain and updates +// the client on the source chain representing the counterparty. +func (coord *Coordinator) RecvPacket( + source, counterparty *TestChain, + sourceClient string, + packet channeltypes.Packet, +) error { + // get proof of packet commitment on source + packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + proof, proofHeight := source.QueryProof(packetKey) + + // Increment time and commit block so that 5 second delay period passes between send and receive + coord.IncrementTime() + coord.CommitBlock(source, counterparty) + + recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, counterparty.SenderAccount.GetAddress()) + + // receive on counterparty and update source client + return coord.SendMsgs(counterparty, source, sourceClient, []sdk.Msg{recvMsg}) +} + +// WriteAcknowledgement writes an acknowledgement to the channel keeper on the source chain and updates the +// counterparty client for the source chain. +func (coord *Coordinator) WriteAcknowledgement( + source, counterparty *TestChain, + packet exported.PacketI, + counterpartyClientID string, +) error { + if err := source.WriteAcknowledgement(packet); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + counterpartyClientID, exported.Tendermint, + ) +} + +// AcknowledgePacket acknowledges on the source chain the packet received on +// the counterparty chain and updates the client on the counterparty representing +// the source chain. +// TODO: add a query for the acknowledgement by events +// - https://github.com/cosmos/cosmos-sdk/issues/6509 +func (coord *Coordinator) AcknowledgePacket( + source, counterparty *TestChain, + counterpartyClient string, + packet channeltypes.Packet, ack []byte, +) error { + // get proof of acknowledgement on counterparty + packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + proof, proofHeight := counterparty.QueryProof(packetKey) + + // Increment time and commit block so that 5 second delay period passes between send and receive + coord.IncrementTime() + coord.CommitBlock(source, counterparty) + + ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, source.SenderAccount.GetAddress()) + return coord.SendMsgs(source, counterparty, counterpartyClient, []sdk.Msg{ackMsg}) +} + +// RelayPacket receives a channel packet on counterparty, queries the ack +// and acknowledges the packet on source. The clients are updated as needed. +func (coord *Coordinator) RelayPacket( + source, counterparty *TestChain, + sourceClient, counterpartyClient string, + packet channeltypes.Packet, ack []byte, +) error { + // Increment time and commit block so that 5 second delay period passes between send and receive + coord.IncrementTime() + coord.CommitBlock(counterparty) + + if err := coord.RecvPacket(source, counterparty, sourceClient, packet); err != nil { + return err + } + + // Increment time and commit block so that 5 second delay period passes between send and receive + coord.IncrementTime() + coord.CommitBlock(source) + + return coord.AcknowledgePacket(source, counterparty, counterpartyClient, packet, ack) +} + +// IncrementTime iterates through all the TestChain's and increments their current header time +// by 5 seconds. +// +// CONTRACT: this function must be called after every commit on any TestChain. +func (coord *Coordinator) IncrementTime() { + for _, chain := range coord.Chains { + chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(TimeIncrement) + chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader}) + } +} + +// IncrementTimeBy iterates through all the TestChain's and increments their current header time +// by specified time. +func (coord *Coordinator) IncrementTimeBy(increment time.Duration) { + for _, chain := range coord.Chains { + chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(increment) + chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader}) + } +} + +// SendMsg delivers a single provided message to the chain. The counterparty +// client is update with the new source consensus state. +func (coord *Coordinator) SendMsg(source, counterparty *TestChain, counterpartyClientID string, msg sdk.Msg) error { + return coord.SendMsgs(source, counterparty, counterpartyClientID, []sdk.Msg{msg}) +} + +// SendMsgs delivers the provided messages to the chain. The counterparty +// client is updated with the new source consensus state. +func (coord *Coordinator) SendMsgs(source, counterparty *TestChain, counterpartyClientID string, msgs []sdk.Msg) error { + if err := source.sendMsgs(msgs...); err != nil { + return err + } + + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + counterpartyClientID, exported.Tendermint, + ) +} + +// GetChain returns the TestChain using the given chainID and returns an error if it does +// not exist. +func (coord *Coordinator) GetChain(chainID string) *TestChain { + chain, found := coord.Chains[chainID] + require.True(coord.t, found, fmt.Sprintf("%s chain does not exist", chainID)) + return chain +} + +// GetChainID returns the chainID used for the provided index. +func GetChainID(index int) string { + return ChainIDPrefix + strconv.Itoa(index) +} + +// CommitBlock commits a block on the provided indexes and then increments the global time. +// +// CONTRACT: the passed in list of indexes must not contain duplicates +func (coord *Coordinator) CommitBlock(chains ...*TestChain) { + for _, chain := range chains { + chain.App.Commit() + chain.NextBlock() + } + coord.IncrementTime() +} + +// CommitNBlocks commits n blocks to state and updates the block height by 1 for each commit. +func (coord *Coordinator) CommitNBlocks(chain *TestChain, n uint64) { + for i := uint64(0); i < n; i++ { + chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader}) + chain.App.Commit() + chain.NextBlock() + coord.IncrementTime() + } +} + +// ConnOpenInit initializes a connection on the source chain with the state INIT +// using the OpenInit handshake call. +// +// NOTE: The counterparty testing connection will be created even if it is not created in the +// application state. +func (coord *Coordinator) ConnOpenInit( + source, counterparty *TestChain, + clientID, counterpartyClientID string, +) (*TestConnection, *TestConnection, error) { + sourceConnection := source.AddTestConnection(clientID, counterpartyClientID) + counterpartyConnection := counterparty.AddTestConnection(counterpartyClientID, clientID) + + // initialize connection on source + if err := source.ConnectionOpenInit(counterparty, sourceConnection, counterpartyConnection); err != nil { + return sourceConnection, counterpartyConnection, err + } + coord.IncrementTime() + + // update source client on counterparty connection + if err := coord.UpdateClient( + counterparty, source, + counterpartyClientID, exported.Tendermint, + ); err != nil { + return sourceConnection, counterpartyConnection, err + } + + return sourceConnection, counterpartyConnection, nil +} + +// ConnOpenInitOnBothChains initializes a connection on the source chain with the state INIT +// using the OpenInit handshake call. +func (coord *Coordinator) ConnOpenInitOnBothChains( + source, counterparty *TestChain, + clientID, counterpartyClientID string, +) (*TestConnection, *TestConnection, error) { + sourceConnection := source.AddTestConnection(clientID, counterpartyClientID) + counterpartyConnection := counterparty.AddTestConnection(counterpartyClientID, clientID) + + // initialize connection on source + if err := source.ConnectionOpenInit(counterparty, sourceConnection, counterpartyConnection); err != nil { + return sourceConnection, counterpartyConnection, err + } + coord.IncrementTime() + + // initialize connection on counterparty + if err := counterparty.ConnectionOpenInit(source, counterpartyConnection, sourceConnection); err != nil { + return sourceConnection, counterpartyConnection, err + } + coord.IncrementTime() + + // update counterparty client on source connection + if err := coord.UpdateClient( + source, counterparty, + clientID, exported.Tendermint, + ); err != nil { + return sourceConnection, counterpartyConnection, err + } + + // update source client on counterparty connection + if err := coord.UpdateClient( + counterparty, source, + counterpartyClientID, exported.Tendermint, + ); err != nil { + return sourceConnection, counterpartyConnection, err + } + + return sourceConnection, counterpartyConnection, nil +} + +// ConnOpenTry initializes a connection on the source chain with the state TRYOPEN +// using the OpenTry handshake call. +func (coord *Coordinator) ConnOpenTry( + source, counterparty *TestChain, + sourceConnection, counterpartyConnection *TestConnection, +) error { + // initialize TRYOPEN connection on source + if err := source.ConnectionOpenTry(counterparty, sourceConnection, counterpartyConnection); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + counterpartyConnection.ClientID, exported.Tendermint, + ) +} + +// ConnOpenAck initializes a connection on the source chain with the state OPEN +// using the OpenAck handshake call. +func (coord *Coordinator) ConnOpenAck( + source, counterparty *TestChain, + sourceConnection, counterpartyConnection *TestConnection, +) error { + // set OPEN connection on source using OpenAck + if err := source.ConnectionOpenAck(counterparty, sourceConnection, counterpartyConnection); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + counterpartyConnection.ClientID, exported.Tendermint, + ) +} + +// ConnOpenConfirm initializes a connection on the source chain with the state OPEN +// using the OpenConfirm handshake call. +func (coord *Coordinator) ConnOpenConfirm( + source, counterparty *TestChain, + sourceConnection, counterpartyConnection *TestConnection, +) error { + if err := source.ConnectionOpenConfirm(counterparty, sourceConnection, counterpartyConnection); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + counterpartyConnection.ClientID, exported.Tendermint, + ) +} + +// ChanOpenInit initializes a channel on the source chain with the state INIT +// using the OpenInit handshake call. +// +// NOTE: The counterparty testing channel will be created even if it is not created in the +// application state. +func (coord *Coordinator) ChanOpenInit( + source, counterparty *TestChain, + connection, counterpartyConnection *TestConnection, + sourcePortID, counterpartyPortID string, + order channeltypes.Order, +) (TestChannel, TestChannel, error) { + sourceChannel := source.AddTestChannel(connection, sourcePortID) + counterpartyChannel := counterparty.AddTestChannel(counterpartyConnection, counterpartyPortID) + + // NOTE: only creation of a capability for a transfer or mock port is supported + // Other applications must bind to the port in InitGenesis or modify this code. + source.CreatePortCapability(sourceChannel.PortID) + coord.IncrementTime() + + // initialize channel on source + if err := source.ChanOpenInit(sourceChannel, counterpartyChannel, order, connection.ID); err != nil { + return sourceChannel, counterpartyChannel, err + } + coord.IncrementTime() + + // update source client on counterparty connection + if err := coord.UpdateClient( + counterparty, source, + counterpartyConnection.ClientID, exported.Tendermint, + ); err != nil { + return sourceChannel, counterpartyChannel, err + } + + return sourceChannel, counterpartyChannel, nil +} + +// ChanOpenInitOnBothChains initializes a channel on the source chain and counterparty chain +// with the state INIT using the OpenInit handshake call. +func (coord *Coordinator) ChanOpenInitOnBothChains( + source, counterparty *TestChain, + connection, counterpartyConnection *TestConnection, + sourcePortID, counterpartyPortID string, + order channeltypes.Order, +) (TestChannel, TestChannel, error) { + sourceChannel := source.AddTestChannel(connection, sourcePortID) + counterpartyChannel := counterparty.AddTestChannel(counterpartyConnection, counterpartyPortID) + + // NOTE: only creation of a capability for a transfer or mock port is supported + // Other applications must bind to the port in InitGenesis or modify this code. + source.CreatePortCapability(sourceChannel.PortID) + counterparty.CreatePortCapability(counterpartyChannel.PortID) + coord.IncrementTime() + + // initialize channel on source + if err := source.ChanOpenInit(sourceChannel, counterpartyChannel, order, connection.ID); err != nil { + return sourceChannel, counterpartyChannel, err + } + coord.IncrementTime() + + // initialize channel on counterparty + if err := counterparty.ChanOpenInit(counterpartyChannel, sourceChannel, order, counterpartyConnection.ID); err != nil { + return sourceChannel, counterpartyChannel, err + } + coord.IncrementTime() + + // update counterparty client on source connection + if err := coord.UpdateClient( + source, counterparty, + connection.ClientID, exported.Tendermint, + ); err != nil { + return sourceChannel, counterpartyChannel, err + } + + // update source client on counterparty connection + if err := coord.UpdateClient( + counterparty, source, + counterpartyConnection.ClientID, exported.Tendermint, + ); err != nil { + return sourceChannel, counterpartyChannel, err + } + + return sourceChannel, counterpartyChannel, nil +} + +// ChanOpenTry initializes a channel on the source chain with the state TRYOPEN +// using the OpenTry handshake call. +func (coord *Coordinator) ChanOpenTry( + source, counterparty *TestChain, + sourceChannel, counterpartyChannel TestChannel, + connection *TestConnection, + order channeltypes.Order, +) error { + + // initialize channel on source + if err := source.ChanOpenTry(counterparty, sourceChannel, counterpartyChannel, order, connection.ID); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + connection.CounterpartyClientID, exported.Tendermint, + ) +} + +// ChanOpenAck initializes a channel on the source chain with the state OPEN +// using the OpenAck handshake call. +func (coord *Coordinator) ChanOpenAck( + source, counterparty *TestChain, + sourceChannel, counterpartyChannel TestChannel, +) error { + + if err := source.ChanOpenAck(counterparty, sourceChannel, counterpartyChannel); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + sourceChannel.CounterpartyClientID, exported.Tendermint, + ) +} + +// ChanOpenConfirm initializes a channel on the source chain with the state OPEN +// using the OpenConfirm handshake call. +func (coord *Coordinator) ChanOpenConfirm( + source, counterparty *TestChain, + sourceChannel, counterpartyChannel TestChannel, +) error { + + if err := source.ChanOpenConfirm(counterparty, sourceChannel, counterpartyChannel); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + sourceChannel.CounterpartyClientID, exported.Tendermint, + ) +} + +// ChanCloseInit closes a channel on the source chain resulting in the channels state +// being set to CLOSED. +// +// NOTE: does not work with ibc-transfer module +func (coord *Coordinator) ChanCloseInit( + source, counterparty *TestChain, + channel TestChannel, +) error { + + if err := source.ChanCloseInit(counterparty, channel); err != nil { + return err + } + coord.IncrementTime() + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + channel.CounterpartyClientID, exported.Tendermint, + ) +} + +// SetChannelClosed sets a channel state to CLOSED. +func (coord *Coordinator) SetChannelClosed( + source, counterparty *TestChain, + testChannel TestChannel, +) error { + channel := source.GetChannel(testChannel) + + channel.State = channeltypes.CLOSED + source.App.IBCKeeper.ChannelKeeper.SetChannel(source.GetContext(), testChannel.PortID, testChannel.ID, channel) + + coord.CommitBlock(source) + + // update source client on counterparty connection + return coord.UpdateClient( + counterparty, source, + testChannel.CounterpartyClientID, exported.Tendermint, + ) +} diff --git a/testing/mock/README.md b/testing/mock/README.md new file mode 100644 index 0000000000..5da403f9c3 --- /dev/null +++ b/testing/mock/README.md @@ -0,0 +1,6 @@ +This package is only intended to be used for testing core IBC. In order to maintain secure +testing, we need to do message passing and execution which requires connecting an IBC application +module that fulfills all the callbacks. We cannot connect to ibc-transfer which does not support +all channel types so instead we create a mock application module which does nothing. It simply +return nil in all cases so no error ever occurs. It is intended to be as minimal and lightweight +as possible and should never import simapp. diff --git a/testing/mock/doc.go b/testing/mock/doc.go new file mode 100644 index 0000000000..eaaa42b2ab --- /dev/null +++ b/testing/mock/doc.go @@ -0,0 +1,9 @@ +/* +This package is only intended to be used for testing core IBC. In order to maintain secure +testing, we need to do message passing and execution which requires connecting an IBC application +module that fulfills all the callbacks. We cannot connect to ibc-transfer which does not support +all channel types so instead we create a mock application module which does nothing. It simply +return nil in all cases so no error ever occurs. It is intended to be as minimal and lightweight +as possible and should never import simapp. +*/ +package mock diff --git a/testing/mock/mock.go b/testing/mock/mock.go new file mode 100644 index 0000000000..663497aa05 --- /dev/null +++ b/testing/mock/mock.go @@ -0,0 +1,188 @@ +package mock + +import ( + "encoding/json" + + "github.com/cosmos/cosmos-sdk/types/module" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + + "github.com/gorilla/mux" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" +) + +const ( + ModuleName = "mock" +) + +var ( + MockAcknowledgement = []byte("mock acknowledgement") + MockCommitment = []byte("mock packet commitment") +) + +// AppModuleBasic is the mock AppModuleBasic. +type AppModuleBasic struct{} + +// Name implements AppModuleBasic interface. +func (AppModuleBasic) Name() string { + return ModuleName +} + +// RegisterLegacyAminoCodec implements AppModuleBasic interface. +func (AppModuleBasic) RegisterLegacyAminoCodec(*codec.LegacyAmino) {} + +// RegisterInterfaces implements AppModuleBasic interface. +func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) {} + +// DefaultGenesis implements AppModuleBasic interface. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONMarshaler) json.RawMessage { + return nil +} + +// ValidateGenesis implements the AppModuleBasic interface. +func (AppModuleBasic) ValidateGenesis(codec.JSONMarshaler, client.TxEncodingConfig, json.RawMessage) error { + return nil +} + +// RegisterRESTRoutes implements AppModuleBasic interface. +func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) {} + +// RegisterGRPCGatewayRoutes implements AppModuleBasic interface. +func (a AppModuleBasic) RegisterGRPCGatewayRoutes(_ client.Context, _ *runtime.ServeMux) {} + +// GetTxCmd implements AppModuleBasic interface. +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd implements AppModuleBasic interface. +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return nil +} + +// AppModule represents the AppModule for the mock module. +type AppModule struct { + AppModuleBasic + scopedKeeper capabilitykeeper.ScopedKeeper +} + +// NewAppModule returns a mock AppModule instance. +func NewAppModule(sk capabilitykeeper.ScopedKeeper) AppModule { + return AppModule{ + scopedKeeper: sk, + } +} + +// RegisterInvariants implements the AppModule interface. +func (AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {} + +// Route implements the AppModule interface. +func (am AppModule) Route() sdk.Route { + return sdk.NewRoute(ModuleName, nil) +} + +// QuerierRoute implements the AppModule interface. +func (AppModule) QuerierRoute() string { + return "" +} + +// LegacyQuerierHandler implements the AppModule interface. +func (am AppModule) LegacyQuerierHandler(*codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices implements the AppModule interface. +func (am AppModule) RegisterServices(module.Configurator) {} + +// InitGenesis implements the AppModule interface. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONMarshaler, data json.RawMessage) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +// ExportGenesis implements the AppModule interface. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONMarshaler) json.RawMessage { + return nil +} + +// BeginBlock implements the AppModule interface +func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { +} + +// EndBlock implements the AppModule interface +func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +//____________________________________________________________________________ + +// OnChanOpenInit implements the IBCModule interface. +func (am AppModule) OnChanOpenInit( + ctx sdk.Context, _ channeltypes.Order, _ []string, portID string, + channelID string, chanCap *capabilitytypes.Capability, _ channeltypes.Counterparty, _ string, +) error { + // Claim channel capability passed back by IBC module + if err := am.scopedKeeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil { + return err + } + + return nil +} + +// OnChanOpenTry implements the IBCModule interface. +func (am AppModule) OnChanOpenTry( + ctx sdk.Context, _ channeltypes.Order, _ []string, portID string, + channelID string, chanCap *capabilitytypes.Capability, _ channeltypes.Counterparty, _, _ string, +) error { + // Claim channel capability passed back by IBC module + if err := am.scopedKeeper.ClaimCapability(ctx, chanCap, host.ChannelCapabilityPath(portID, channelID)); err != nil { + return err + } + + return nil +} + +// OnChanOpenAck implements the IBCModule interface. +func (am AppModule) OnChanOpenAck(sdk.Context, string, string, string) error { + return nil +} + +// OnChanOpenConfirm implements the IBCModule interface. +func (am AppModule) OnChanOpenConfirm(sdk.Context, string, string) error { + return nil +} + +// OnChanCloseInit implements the IBCModule interface. +func (am AppModule) OnChanCloseInit(sdk.Context, string, string) error { + return nil +} + +// OnChanCloseConfirm implements the IBCModule interface. +func (am AppModule) OnChanCloseConfirm(sdk.Context, string, string) error { + return nil +} + +// OnRecvPacket implements the IBCModule interface. +func (am AppModule) OnRecvPacket(sdk.Context, channeltypes.Packet) (*sdk.Result, []byte, error) { + return nil, MockAcknowledgement, nil +} + +// OnAcknowledgementPacket implements the IBCModule interface. +func (am AppModule) OnAcknowledgementPacket(sdk.Context, channeltypes.Packet, []byte) (*sdk.Result, error) { + return nil, nil +} + +// OnTimeoutPacket implements the IBCModule interface. +func (am AppModule) OnTimeoutPacket(sdk.Context, channeltypes.Packet) (*sdk.Result, error) { + return nil, nil +} diff --git a/testing/mock/privval.go b/testing/mock/privval.go new file mode 100644 index 0000000000..fe46659b3d --- /dev/null +++ b/testing/mock/privval.go @@ -0,0 +1,50 @@ +package mock + +import ( + "github.com/tendermint/tendermint/crypto" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" +) + +var _ tmtypes.PrivValidator = PV{} + +// MockPV implements PrivValidator without any safety or persistence. +// Only use it for testing. +type PV struct { + PrivKey cryptotypes.PrivKey +} + +func NewPV() PV { + return PV{ed25519.GenPrivKey()} +} + +// GetPubKey implements PrivValidator interface +func (pv PV) GetPubKey() (crypto.PubKey, error) { + return cryptocodec.ToTmPubKeyInterface(pv.PrivKey.PubKey()) +} + +// SignVote implements PrivValidator interface +func (pv PV) SignVote(chainID string, vote *tmproto.Vote) error { + signBytes := tmtypes.VoteSignBytes(chainID, vote) + sig, err := pv.PrivKey.Sign(signBytes) + if err != nil { + return err + } + vote.Signature = sig + return nil +} + +// SignProposal implements PrivValidator interface +func (pv PV) SignProposal(chainID string, proposal *tmproto.Proposal) error { + signBytes := tmtypes.ProposalSignBytes(chainID, proposal) + sig, err := pv.PrivKey.Sign(signBytes) + if err != nil { + return err + } + proposal.Signature = sig + return nil +} diff --git a/testing/mock/privval_test.go b/testing/mock/privval_test.go new file mode 100644 index 0000000000..b9f0487a36 --- /dev/null +++ b/testing/mock/privval_test.go @@ -0,0 +1,44 @@ +package mock_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock" +) + +const chainID = "testChain" + +func TestGetPubKey(t *testing.T) { + pv := mock.NewPV() + pk, err := pv.GetPubKey() + require.NoError(t, err) + require.Equal(t, "ed25519", pk.Type()) +} + +func TestSignVote(t *testing.T) { + pv := mock.NewPV() + pk, _ := pv.GetPubKey() + + vote := &tmproto.Vote{Height: 2} + pv.SignVote(chainID, vote) + + msg := tmtypes.VoteSignBytes(chainID, vote) + ok := pk.VerifySignature(msg, vote.Signature) + require.True(t, ok) +} + +func TestSignProposal(t *testing.T) { + pv := mock.NewPV() + pk, _ := pv.GetPubKey() + + proposal := &tmproto.Proposal{Round: 2} + pv.SignProposal(chainID, proposal) + + msg := tmtypes.ProposalSignBytes(chainID, proposal) + ok := pk.VerifySignature(msg, proposal.Signature) + require.True(t, ok) +} diff --git a/testing/solomachine.go b/testing/solomachine.go new file mode 100644 index 0000000000..bee6378597 --- /dev/null +++ b/testing/solomachine.go @@ -0,0 +1,321 @@ +package ibctesting + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + kmultisig "github.com/cosmos/cosmos-sdk/crypto/keys/multisig" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/crypto/types/multisig" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" + commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types" + host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" + "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" + solomachinetypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/06-solomachine/types" +) + +var prefix = commitmenttypes.NewMerklePrefix([]byte("ibc")) + +// Solomachine is a testing helper used to simulate a counterparty +// solo machine client. +type Solomachine struct { + t *testing.T + + cdc codec.BinaryMarshaler + ClientID string + PrivateKeys []cryptotypes.PrivKey // keys used for signing + PublicKeys []cryptotypes.PubKey // keys used for generating solo machine pub key + PublicKey cryptotypes.PubKey // key used for verification + Sequence uint64 + Time uint64 + Diversifier string +} + +// NewSolomachine returns a new solomachine instance with an `nKeys` amount of +// generated private/public key pairs and a sequence starting at 1. If nKeys +// is greater than 1 then a multisig public key is used. +func NewSolomachine(t *testing.T, cdc codec.BinaryMarshaler, clientID, diversifier string, nKeys uint64) *Solomachine { + privKeys, pubKeys, pk := GenerateKeys(t, nKeys) + + return &Solomachine{ + t: t, + cdc: cdc, + ClientID: clientID, + PrivateKeys: privKeys, + PublicKeys: pubKeys, + PublicKey: pk, + Sequence: 1, + Time: 10, + Diversifier: diversifier, + } +} + +// GenerateKeys generates a new set of secp256k1 private keys and public keys. +// If the number of keys is greater than one then the public key returned represents +// a multisig public key. The private keys are used for signing, the public +// keys are used for generating the public key and the public key is used for +// solo machine verification. The usage of secp256k1 is entirely arbitrary. +// The key type can be swapped for any key type supported by the PublicKey +// interface, if needed. The same is true for the amino based Multisignature +// public key. +func GenerateKeys(t *testing.T, n uint64) ([]cryptotypes.PrivKey, []cryptotypes.PubKey, cryptotypes.PubKey) { + require.NotEqual(t, uint64(0), n, "generation of zero keys is not allowed") + + privKeys := make([]cryptotypes.PrivKey, n) + pubKeys := make([]cryptotypes.PubKey, n) + for i := uint64(0); i < n; i++ { + privKeys[i] = secp256k1.GenPrivKey() + pubKeys[i] = privKeys[i].PubKey() + } + + var pk cryptotypes.PubKey + if len(privKeys) > 1 { + // generate multi sig pk + pk = kmultisig.NewLegacyAminoPubKey(int(n), pubKeys) + } else { + pk = privKeys[0].PubKey() + } + + return privKeys, pubKeys, pk +} + +// ClientState returns a new solo machine ClientState instance. Default usage does not allow update +// after governance proposal +func (solo *Solomachine) ClientState() *solomachinetypes.ClientState { + return solomachinetypes.NewClientState(solo.Sequence, solo.ConsensusState(), false) +} + +// ConsensusState returns a new solo machine ConsensusState instance +func (solo *Solomachine) ConsensusState() *solomachinetypes.ConsensusState { + publicKey, err := codectypes.NewAnyWithValue(solo.PublicKey) + require.NoError(solo.t, err) + + return &solomachinetypes.ConsensusState{ + PublicKey: publicKey, + Diversifier: solo.Diversifier, + Timestamp: solo.Time, + } +} + +// GetHeight returns an exported.Height with Sequence as RevisionHeight +func (solo *Solomachine) GetHeight() exported.Height { + return clienttypes.NewHeight(0, solo.Sequence) +} + +// CreateHeader generates a new private/public key pair and creates the +// necessary signature to construct a valid solo machine header. +func (solo *Solomachine) CreateHeader() *solomachinetypes.Header { + // generate new private keys and signature for header + newPrivKeys, newPubKeys, newPubKey := GenerateKeys(solo.t, uint64(len(solo.PrivateKeys))) + + publicKey, err := codectypes.NewAnyWithValue(newPubKey) + require.NoError(solo.t, err) + + data := &solomachinetypes.HeaderData{ + NewPubKey: publicKey, + NewDiversifier: solo.Diversifier, + } + + dataBz, err := solo.cdc.MarshalBinaryBare(data) + require.NoError(solo.t, err) + + signBytes := &solomachinetypes.SignBytes{ + Sequence: solo.Sequence, + Timestamp: solo.Time, + Diversifier: solo.Diversifier, + DataType: solomachinetypes.HEADER, + Data: dataBz, + } + + bz, err := solo.cdc.MarshalBinaryBare(signBytes) + require.NoError(solo.t, err) + + sig := solo.GenerateSignature(bz) + + header := &solomachinetypes.Header{ + Sequence: solo.Sequence, + Timestamp: solo.Time, + Signature: sig, + NewPublicKey: publicKey, + NewDiversifier: solo.Diversifier, + } + + // assumes successful header update + solo.Sequence++ + solo.PrivateKeys = newPrivKeys + solo.PublicKeys = newPubKeys + solo.PublicKey = newPubKey + + return header +} + +// CreateMisbehaviour constructs testing misbehaviour for the solo machine client +// by signing over two different data bytes at the same sequence. +func (solo *Solomachine) CreateMisbehaviour() *solomachinetypes.Misbehaviour { + path := solo.GetClientStatePath("counterparty") + dataOne, err := solomachinetypes.ClientStateDataBytes(solo.cdc, path, solo.ClientState()) + require.NoError(solo.t, err) + + path = solo.GetConsensusStatePath("counterparty", clienttypes.NewHeight(0, 1)) + dataTwo, err := solomachinetypes.ConsensusStateDataBytes(solo.cdc, path, solo.ConsensusState()) + require.NoError(solo.t, err) + + signBytes := &solomachinetypes.SignBytes{ + Sequence: solo.Sequence, + Timestamp: solo.Time, + Diversifier: solo.Diversifier, + DataType: solomachinetypes.CLIENT, + Data: dataOne, + } + + bz, err := solo.cdc.MarshalBinaryBare(signBytes) + require.NoError(solo.t, err) + + sig := solo.GenerateSignature(bz) + signatureOne := solomachinetypes.SignatureAndData{ + Signature: sig, + DataType: solomachinetypes.CLIENT, + Data: dataOne, + Timestamp: solo.Time, + } + + // misbehaviour signaturess can have different timestamps + solo.Time++ + + signBytes = &solomachinetypes.SignBytes{ + Sequence: solo.Sequence, + Timestamp: solo.Time, + Diversifier: solo.Diversifier, + DataType: solomachinetypes.CONSENSUS, + Data: dataTwo, + } + + bz, err = solo.cdc.MarshalBinaryBare(signBytes) + require.NoError(solo.t, err) + + sig = solo.GenerateSignature(bz) + signatureTwo := solomachinetypes.SignatureAndData{ + Signature: sig, + DataType: solomachinetypes.CONSENSUS, + Data: dataTwo, + Timestamp: solo.Time, + } + + return &solomachinetypes.Misbehaviour{ + ClientId: solo.ClientID, + Sequence: solo.Sequence, + SignatureOne: &signatureOne, + SignatureTwo: &signatureTwo, + } +} + +// GenerateSignature uses the stored private keys to generate a signature +// over the sign bytes with each key. If the amount of keys is greater than +// 1 then a multisig data type is returned. +func (solo *Solomachine) GenerateSignature(signBytes []byte) []byte { + sigs := make([]signing.SignatureData, len(solo.PrivateKeys)) + for i, key := range solo.PrivateKeys { + sig, err := key.Sign(signBytes) + require.NoError(solo.t, err) + + sigs[i] = &signing.SingleSignatureData{ + Signature: sig, + } + } + + var sigData signing.SignatureData + if len(sigs) == 1 { + // single public key + sigData = sigs[0] + } else { + // generate multi signature data + multiSigData := multisig.NewMultisig(len(sigs)) + for i, sig := range sigs { + multisig.AddSignature(multiSigData, sig, i) + } + + sigData = multiSigData + } + + protoSigData := signing.SignatureDataToProto(sigData) + bz, err := solo.cdc.MarshalBinaryBare(protoSigData) + require.NoError(solo.t, err) + + return bz +} + +// GetClientStatePath returns the commitment path for the client state. +func (solo *Solomachine) GetClientStatePath(counterpartyClientIdentifier string) commitmenttypes.MerklePath { + path, err := commitmenttypes.ApplyPrefix(prefix, commitmenttypes.NewMerklePath(host.FullClientStatePath(counterpartyClientIdentifier))) + require.NoError(solo.t, err) + + return path +} + +// GetConsensusStatePath returns the commitment path for the consensus state. +func (solo *Solomachine) GetConsensusStatePath(counterpartyClientIdentifier string, consensusHeight exported.Height) commitmenttypes.MerklePath { + path, err := commitmenttypes.ApplyPrefix(prefix, commitmenttypes.NewMerklePath(host.FullConsensusStatePath(counterpartyClientIdentifier, consensusHeight))) + require.NoError(solo.t, err) + + return path +} + +// GetConnectionStatePath returns the commitment path for the connection state. +func (solo *Solomachine) GetConnectionStatePath(connID string) commitmenttypes.MerklePath { + connectionPath := commitmenttypes.NewMerklePath(host.ConnectionPath(connID)) + path, err := commitmenttypes.ApplyPrefix(prefix, connectionPath) + require.NoError(solo.t, err) + + return path +} + +// GetChannelStatePath returns the commitment path for that channel state. +func (solo *Solomachine) GetChannelStatePath(portID, channelID string) commitmenttypes.MerklePath { + channelPath := commitmenttypes.NewMerklePath(host.ChannelPath(portID, channelID)) + path, err := commitmenttypes.ApplyPrefix(prefix, channelPath) + require.NoError(solo.t, err) + + return path +} + +// GetPacketCommitmentPath returns the commitment path for a packet commitment. +func (solo *Solomachine) GetPacketCommitmentPath(portID, channelID string) commitmenttypes.MerklePath { + commitmentPath := commitmenttypes.NewMerklePath(host.PacketCommitmentPath(portID, channelID, solo.Sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, commitmentPath) + require.NoError(solo.t, err) + + return path +} + +// GetPacketAcknowledgementPath returns the commitment path for a packet acknowledgement. +func (solo *Solomachine) GetPacketAcknowledgementPath(portID, channelID string) commitmenttypes.MerklePath { + ackPath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementPath(portID, channelID, solo.Sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, ackPath) + require.NoError(solo.t, err) + + return path +} + +// GetPacketReceiptPath returns the commitment path for a packet receipt +// and an absent receipts. +func (solo *Solomachine) GetPacketReceiptPath(portID, channelID string) commitmenttypes.MerklePath { + receiptPath := commitmenttypes.NewMerklePath(host.PacketReceiptPath(portID, channelID, solo.Sequence)) + path, err := commitmenttypes.ApplyPrefix(prefix, receiptPath) + require.NoError(solo.t, err) + + return path +} + +// GetNextSequenceRecvPath returns the commitment path for the next sequence recv counter. +func (solo *Solomachine) GetNextSequenceRecvPath(portID, channelID string) commitmenttypes.MerklePath { + nextSequenceRecvPath := commitmenttypes.NewMerklePath(host.NextSequenceRecvPath(portID, channelID)) + path, err := commitmenttypes.ApplyPrefix(prefix, nextSequenceRecvPath) + require.NoError(solo.t, err) + + return path +} diff --git a/testing/types.go b/testing/types.go new file mode 100644 index 0000000000..16cda6216b --- /dev/null +++ b/testing/types.go @@ -0,0 +1,44 @@ +package ibctesting + +import ( + channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" +) + +// TestConnection is a testing helper struct to keep track of the connectionID, source clientID, +// counterparty clientID, and the next channel version used in creating and interacting with a +// connection. +type TestConnection struct { + ID string + ClientID string + CounterpartyClientID string + NextChannelVersion string + Channels []TestChannel +} + +// FirstOrNextTestChannel returns the first test channel if it exists, otherwise it +// returns the next test channel to be created. This function is expected to be used +// when the caller does not know if the channel has or has not been created in app +// state, but would still like to refer to it to test existence or non-existence. +func (conn *TestConnection) FirstOrNextTestChannel(portID string) TestChannel { + if len(conn.Channels) > 0 { + return conn.Channels[0] + } + return TestChannel{ + PortID: portID, + ID: channeltypes.FormatChannelIdentifier(0), + ClientID: conn.ClientID, + CounterpartyClientID: conn.CounterpartyClientID, + Version: conn.NextChannelVersion, + } +} + +// TestChannel is a testing helper struct to keep track of the portID and channelID +// used in creating and interacting with a channel. The clientID and counterparty +// client ID are also tracked to cut down on querying and argument passing. +type TestChannel struct { + PortID string + ID string + ClientID string + CounterpartyClientID string + Version string +} diff --git a/third_party/proto/confio/proofs.proto b/third_party/proto/confio/proofs.proto new file mode 100644 index 0000000000..da43503ecb --- /dev/null +++ b/third_party/proto/confio/proofs.proto @@ -0,0 +1,234 @@ +syntax = "proto3"; + +package ics23; +option go_package = "github.com/confio/ics23/go"; + +enum HashOp { + // NO_HASH is the default if no data passed. Note this is an illegal argument some places. + NO_HASH = 0; + SHA256 = 1; + SHA512 = 2; + KECCAK = 3; + RIPEMD160 = 4; + BITCOIN = 5; // ripemd160(sha256(x)) +} + +/** +LengthOp defines how to process the key and value of the LeafOp +to include length information. After encoding the length with the given +algorithm, the length will be prepended to the key and value bytes. +(Each one with it's own encoded length) +*/ +enum LengthOp { + // NO_PREFIX don't include any length info + NO_PREFIX = 0; + // VAR_PROTO uses protobuf (and go-amino) varint encoding of the length + VAR_PROTO = 1; + // VAR_RLP uses rlp int encoding of the length + VAR_RLP = 2; + // FIXED32_BIG uses big-endian encoding of the length as a 32 bit integer + FIXED32_BIG = 3; + // FIXED32_LITTLE uses little-endian encoding of the length as a 32 bit integer + FIXED32_LITTLE = 4; + // FIXED64_BIG uses big-endian encoding of the length as a 64 bit integer + FIXED64_BIG = 5; + // FIXED64_LITTLE uses little-endian encoding of the length as a 64 bit integer + FIXED64_LITTLE = 6; + // REQUIRE_32_BYTES is like NONE, but will fail if the input is not exactly 32 bytes (sha256 output) + REQUIRE_32_BYTES = 7; + // REQUIRE_64_BYTES is like NONE, but will fail if the input is not exactly 64 bytes (sha512 output) + REQUIRE_64_BYTES = 8; +} + +/** +ExistenceProof takes a key and a value and a set of steps to perform on it. +The result of peforming all these steps will provide a "root hash", which can +be compared to the value in a header. + +Since it is computationally infeasible to produce a hash collission for any of the used +cryptographic hash functions, if someone can provide a series of operations to transform +a given key and value into a root hash that matches some trusted root, these key and values +must be in the referenced merkle tree. + +The only possible issue is maliablity in LeafOp, such as providing extra prefix data, +which should be controlled by a spec. Eg. with lengthOp as NONE, + prefix = FOO, key = BAR, value = CHOICE +and + prefix = F, key = OOBAR, value = CHOICE +would produce the same value. + +With LengthOp this is tricker but not impossible. Which is why the "leafPrefixEqual" field +in the ProofSpec is valuable to prevent this mutability. And why all trees should +length-prefix the data before hashing it. +*/ +message ExistenceProof { + bytes key = 1; + bytes value = 2; + LeafOp leaf = 3; + repeated InnerOp path = 4; +} + +/* +NonExistenceProof takes a proof of two neighbors, one left of the desired key, +one right of the desired key. If both proofs are valid AND they are neighbors, +then there is no valid proof for the given key. +*/ +message NonExistenceProof { + bytes key = 1; // TODO: remove this as unnecessary??? we prove a range + ExistenceProof left = 2; + ExistenceProof right = 3; +} + +/* +CommitmentProof is either an ExistenceProof or a NonExistenceProof, or a Batch of such messages +*/ +message CommitmentProof { + oneof proof { + ExistenceProof exist = 1; + NonExistenceProof nonexist = 2; + BatchProof batch = 3; + CompressedBatchProof compressed = 4; + } +} + +/** +LeafOp represents the raw key-value data we wish to prove, and +must be flexible to represent the internal transformation from +the original key-value pairs into the basis hash, for many existing +merkle trees. + +key and value are passed in. So that the signature of this operation is: + leafOp(key, value) -> output + +To process this, first prehash the keys and values if needed (ANY means no hash in this case): + hkey = prehashKey(key) + hvalue = prehashValue(value) + +Then combine the bytes, and hash it + output = hash(prefix || length(hkey) || hkey || length(hvalue) || hvalue) +*/ +message LeafOp { + HashOp hash = 1; + HashOp prehash_key = 2; + HashOp prehash_value = 3; + LengthOp length = 4; + // prefix is a fixed bytes that may optionally be included at the beginning to differentiate + // a leaf node from an inner node. + bytes prefix = 5; +} + +/** +InnerOp represents a merkle-proof step that is not a leaf. +It represents concatenating two children and hashing them to provide the next result. + +The result of the previous step is passed in, so the signature of this op is: + innerOp(child) -> output + +The result of applying InnerOp should be: + output = op.hash(op.prefix || child || op.suffix) + + where the || operator is concatenation of binary data, +and child is the result of hashing all the tree below this step. + +Any special data, like prepending child with the length, or prepending the entire operation with +some value to differentiate from leaf nodes, should be included in prefix and suffix. +If either of prefix or suffix is empty, we just treat it as an empty string +*/ +message InnerOp { + HashOp hash = 1; + bytes prefix = 2; + bytes suffix = 3; +} + + +/** +ProofSpec defines what the expected parameters are for a given proof type. +This can be stored in the client and used to validate any incoming proofs. + + verify(ProofSpec, Proof) -> Proof | Error + +As demonstrated in tests, if we don't fix the algorithm used to calculate the +LeafHash for a given tree, there are many possible key-value pairs that can +generate a given hash (by interpretting the preimage differently). +We need this for proper security, requires client knows a priori what +tree format server uses. But not in code, rather a configuration object. +*/ +message ProofSpec { + // any field in the ExistenceProof must be the same as in this spec. + // except Prefix, which is just the first bytes of prefix (spec can be longer) + LeafOp leaf_spec = 1; + InnerSpec inner_spec = 2; + // max_depth (if > 0) is the maximum number of InnerOps allowed (mainly for fixed-depth tries) + int32 max_depth = 3; + // min_depth (if > 0) is the minimum number of InnerOps allowed (mainly for fixed-depth tries) + int32 min_depth = 4; +} + +/* +InnerSpec contains all store-specific structure info to determine if two proofs from a +given store are neighbors. + +This enables: + + isLeftMost(spec: InnerSpec, op: InnerOp) + isRightMost(spec: InnerSpec, op: InnerOp) + isLeftNeighbor(spec: InnerSpec, left: InnerOp, right: InnerOp) +*/ +message InnerSpec { + // Child order is the ordering of the children node, must count from 0 + // iavl tree is [0, 1] (left then right) + // merk is [0, 2, 1] (left, right, here) + repeated int32 child_order = 1; + int32 child_size = 2; + int32 min_prefix_length = 3; + int32 max_prefix_length = 4; + // empty child is the prehash image that is used when one child is nil (eg. 20 bytes of 0) + bytes empty_child = 5; + // hash is the algorithm that must be used for each InnerOp + HashOp hash = 6; +} + +/* +BatchProof is a group of multiple proof types than can be compressed +*/ +message BatchProof { + repeated BatchEntry entries = 1; +} + +// Use BatchEntry not CommitmentProof, to avoid recursion +message BatchEntry { + oneof proof { + ExistenceProof exist = 1; + NonExistenceProof nonexist = 2; + } +} + + +/****** all items here are compressed forms *******/ + +message CompressedBatchProof { + repeated CompressedBatchEntry entries = 1; + repeated InnerOp lookup_inners = 2; +} + +// Use BatchEntry not CommitmentProof, to avoid recursion +message CompressedBatchEntry { + oneof proof { + CompressedExistenceProof exist = 1; + CompressedNonExistenceProof nonexist = 2; + } +} + +message CompressedExistenceProof { + bytes key = 1; + bytes value = 2; + LeafOp leaf = 3; + // these are indexes into the lookup_inners table in CompressedBatchProof + repeated int32 path = 4; +} + +message CompressedNonExistenceProof { + bytes key = 1; // TODO: remove this as unnecessary??? we prove a range + CompressedExistenceProof left = 2; + CompressedExistenceProof right = 3; +} diff --git a/third_party/proto/cosmos/base/query/v1beta1/pagination.proto b/third_party/proto/cosmos/base/query/v1beta1/pagination.proto new file mode 100644 index 0000000000..2a8cbccedd --- /dev/null +++ b/third_party/proto/cosmos/base/query/v1beta1/pagination.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; +package cosmos.base.query.v1beta1; + +option go_package = "github.com/cosmos/cosmos-sdk/types/query"; + +// PageRequest is to be embedded in gRPC request messages for efficient +// pagination. Ex: +// +// message SomeRequest { +// Foo some_parameter = 1; +// PageRequest pagination = 2; +// } +message PageRequest { + // key is a value returned in PageResponse.next_key to begin + // querying the next page most efficiently. Only one of offset or key + // should be set. + bytes key = 1; + + // offset is a numeric offset that can be used when key is unavailable. + // It is less efficient than using key. Only one of offset or key should + // be set. + uint64 offset = 2; + + // limit is the total number of results to be returned in the result page. + // If left empty it will default to a value to be set by each app. + uint64 limit = 3; + + // count_total is set to true to indicate that the result set should include + // a count of the total number of items available for pagination in UIs. + // count_total is only respected when offset is used. It is ignored when key + // is set. + bool count_total = 4; +} + +// PageResponse is to be embedded in gRPC response messages where the +// corresponding request message has used PageRequest. +// +// message SomeResponse { +// repeated Bar results = 1; +// PageResponse page = 2; +// } +message PageResponse { + // next_key is the key to be passed to PageRequest.key to + // query the next page most efficiently + bytes next_key = 1; + + // total is total number of results available if PageRequest.count_total + // was set, its value is undefined otherwise + uint64 total = 2; +} diff --git a/third_party/proto/cosmos/base/v1beta1/coin.proto b/third_party/proto/cosmos/base/v1beta1/coin.proto new file mode 100644 index 0000000000..fab75284b7 --- /dev/null +++ b/third_party/proto/cosmos/base/v1beta1/coin.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; +package cosmos.base.v1beta1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/cosmos/cosmos-sdk/types"; +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = false; + +// Coin defines a token with a denomination and an amount. +// +// NOTE: The amount field is an Int which implements the custom method +// signatures required by gogoproto. +message Coin { + option (gogoproto.equal) = true; + + string denom = 1; + string amount = 2 [(gogoproto.customtype) = "Int", (gogoproto.nullable) = false]; +} + +// DecCoin defines a token with a denomination and a decimal amount. +// +// NOTE: The amount field is an Dec which implements the custom method +// signatures required by gogoproto. +message DecCoin { + option (gogoproto.equal) = true; + + string denom = 1; + string amount = 2 [(gogoproto.customtype) = "Dec", (gogoproto.nullable) = false]; +} + +// IntProto defines a Protobuf wrapper around an Int object. +message IntProto { + string int = 1 [(gogoproto.customtype) = "Int", (gogoproto.nullable) = false]; +} + +// DecProto defines a Protobuf wrapper around a Dec object. +message DecProto { + string dec = 1 [(gogoproto.customtype) = "Dec", (gogoproto.nullable) = false]; +} diff --git a/third_party/proto/gogoproto/gogo.proto b/third_party/proto/gogoproto/gogo.proto new file mode 100644 index 0000000000..49e78f99fe --- /dev/null +++ b/third_party/proto/gogoproto/gogo.proto @@ -0,0 +1,145 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + + optional string castrepeated = 65013; +} diff --git a/third_party/proto/google/api/annotations.proto b/third_party/proto/google/api/annotations.proto new file mode 100644 index 0000000000..85c361b47f --- /dev/null +++ b/third_party/proto/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/third_party/proto/google/api/http.proto b/third_party/proto/google/api/http.proto new file mode 100644 index 0000000000..2bd3a19bfa --- /dev/null +++ b/third_party/proto/google/api/http.proto @@ -0,0 +1,318 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parmeters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// `HttpRule` defines the mapping of an RPC method to one or more HTTP +// REST API methods. The mapping specifies how different portions of the RPC +// request message are mapped to URL path, URL query parameters, and +// HTTP request body. The mapping is typically specified as an +// `google.api.http` annotation on the RPC method, +// see "google/api/annotations.proto" for details. +// +// The mapping consists of a field specifying the path template and +// method kind. The path template can refer to fields in the request +// message, as in the example below which describes a REST GET +// operation on a resource collection of messages: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// SubMessage sub = 2; // `sub.subfield` is url-mapped +// } +// message Message { +// string text = 1; // content of the resource +// } +// +// The same http annotation can alternatively be expressed inside the +// `GRPC API Configuration` YAML file. +// +// http: +// rules: +// - selector: .Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// This definition enables an automatic, bidrectional mapping of HTTP +// JSON to RPC. Example: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` +// +// In general, not only fields but also field paths can be referenced +// from a path pattern. Fields mapped to the path pattern cannot be +// repeated and must have a primitive (non-message) type. +// +// Any fields in the request message which are not bound by the path +// pattern automatically become (optional) HTTP query +// parameters. Assume the following definition of the request message: +// +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http).get = "/v1/messages/{message_id}"; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // mapped to the URL +// int64 revision = 2; // becomes a parameter +// SubMessage sub = 3; // `sub.subfield` becomes a parameter +// } +// +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to HTTP parameters must have a +// primitive type or a repeated primitive type. Message types are not +// allowed. In the case of a repeated type, the parameter can be +// repeated in the URL, as in `...?param=A¶m=B`. +// +// For HTTP method kinds which allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// put: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | RPC +// -----|----- +// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice of +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// +// This enables the following two alternative HTTP JSON to RPC +// mappings: +// +// HTTP | RPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping +// +// The rules for mapping HTTP path, query parameters, and body fields +// to the request message are as follows: +// +// 1. The `body` field specifies either `*` or a field path, or is +// omitted. If omitted, it indicates there is no HTTP request body. +// 2. Leaf fields (recursive expansion of nested messages in the +// request) can be classified into three types: +// (a) Matched in the URL template. +// (b) Covered by body (if body is `*`, everything except (a) fields; +// else everything under the body field) +// (c) All other fields. +// 3. URL query parameters found in the HTTP request are mapped to (c) fields. +// 4. Any body sent with an HTTP request can contain only (b) fields. +// +// The syntax of the path template is as follows: +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single path segment. The syntax `**` matches zero +// or more path segments, which must be the last part of the path except the +// `Verb`. The syntax `LITERAL` matches literal text in the path. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path, all characters +// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the +// Discovery Document as `{var}`. +// +// If a variable contains one or more path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path, all +// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables +// show up in the Discovery Document as `{+var}`. +// +// NOTE: While the single segment variable matches the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 +// Simple String Expansion, the multi segment variable **does not** match +// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. +// +// NOTE: the field paths in variables and in the `body` must not refer to +// repeated fields or map fields. +message HttpRule { + // Selects methods to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Used for listing and getting information about resources. + string get = 2; + + // Used for updating a resource. + string put = 3; + + // Used for creating a resource. + string post = 4; + + // Used for deleting a resource. + string delete = 5; + + // Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP body, or + // `*` for mapping all fields not captured by the path pattern to the HTTP + // body. NOTE: the referred field must not be a repeated field and must be + // present at the top-level of request message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // body of response. Other response fields are ignored. When + // not set, the response message will be used as HTTP body of response. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/third_party/proto/google/protobuf/any.proto b/third_party/proto/google/protobuf/any.proto new file mode 100644 index 0000000000..1431810ea4 --- /dev/null +++ b/third_party/proto/google/protobuf/any.proto @@ -0,0 +1,161 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "gogoproto/gogo.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; + + option (gogoproto.typedecl) = false; +} + +option (gogoproto.goproto_registration) = false; diff --git a/third_party/proto/tendermint/crypto/keys.proto b/third_party/proto/tendermint/crypto/keys.proto new file mode 100644 index 0000000000..16fd7adf3e --- /dev/null +++ b/third_party/proto/tendermint/crypto/keys.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package tendermint.crypto; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; + +import "gogoproto/gogo.proto"; + +// PublicKey defines the keys available for use with Tendermint Validators +message PublicKey { + option (gogoproto.compare) = true; + option (gogoproto.equal) = true; + + oneof sum { + bytes ed25519 = 1; + bytes secp256k1 = 2; + } +} diff --git a/third_party/proto/tendermint/crypto/proof.proto b/third_party/proto/tendermint/crypto/proof.proto new file mode 100644 index 0000000000..975df76853 --- /dev/null +++ b/third_party/proto/tendermint/crypto/proof.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; +package tendermint.crypto; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; + +import "gogoproto/gogo.proto"; + +message Proof { + int64 total = 1; + int64 index = 2; + bytes leaf_hash = 3; + repeated bytes aunts = 4; +} + +message ValueOp { + // Encoded in ProofOp.Key. + bytes key = 1; + + // To encode in ProofOp.Data + Proof proof = 2; +} + +message DominoOp { + string key = 1; + string input = 2; + string output = 3; +} + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} + +// ProofOps is Merkle proof defined by the list of ProofOps +message ProofOps { + repeated ProofOp ops = 1 [(gogoproto.nullable) = false]; +} diff --git a/third_party/proto/tendermint/libs/bits/types.proto b/third_party/proto/tendermint/libs/bits/types.proto new file mode 100644 index 0000000000..3111d113a5 --- /dev/null +++ b/third_party/proto/tendermint/libs/bits/types.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package tendermint.libs.bits; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/libs/bits"; + +message BitArray { + int64 bits = 1; + repeated uint64 elems = 2; +} diff --git a/third_party/proto/tendermint/types/types.proto b/third_party/proto/tendermint/types/types.proto new file mode 100644 index 0000000000..7f7ea74cac --- /dev/null +++ b/third_party/proto/tendermint/types/types.proto @@ -0,0 +1,157 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "tendermint/crypto/proof.proto"; +import "tendermint/version/types.proto"; +import "tendermint/types/validator.proto"; + +// BlockIdFlag indicates which BlcokID the signature is for +enum BlockIDFlag { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; + BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; + BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; + BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; +} + +// SignedMsgType is a type of signed message in the consensus. +enum SignedMsgType { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; + // Votes + SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; + SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; + + // Proposals + SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; +} + +// PartsetHeader +message PartSetHeader { + uint32 total = 1; + bytes hash = 2; +} + +message Part { + uint32 index = 1; + bytes bytes = 2; + tendermint.crypto.Proof proof = 3 [(gogoproto.nullable) = false]; +} + +// BlockID +message BlockID { + bytes hash = 1; + PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; +} + +// -------------------------------- + +// Header defines the structure of a Tendermint block header. +message Header { + // basic block info + tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false]; + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 height = 3; + google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + // prev block info + BlockID last_block_id = 5 [(gogoproto.nullable) = false]; + + // hashes of block data + bytes last_commit_hash = 6; // commit from validators from the last block + bytes data_hash = 7; // transactions + + // hashes from the app output from the prev block + bytes validators_hash = 8; // validators for the current block + bytes next_validators_hash = 9; // validators for the next block + bytes consensus_hash = 10; // consensus params for current block + bytes app_hash = 11; // state after txs from the previous block + bytes last_results_hash = 12; // root hash of all results from the txs from the previous block + + // consensus info + bytes evidence_hash = 13; // evidence included in the block + bytes proposer_address = 14; // original proposer of the block +} + +// Data contains the set of transactions included in the block +message Data { + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + repeated bytes txs = 1; +} + +// Vote represents a prevote, precommit, or commit vote from validators for +// consensus. +message Vote { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + BlockID block_id = 4 + [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil. + google.protobuf.Timestamp timestamp = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes validator_address = 6; + int32 validator_index = 7; + bytes signature = 8; +} + +// Commit contains the evidence that a block was committed by a set of validators. +message Commit { + int64 height = 1; + int32 round = 2; + BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; + repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; +} + +// CommitSig is a part of the Vote included in a Commit. +message CommitSig { + BlockIDFlag block_id_flag = 1; + bytes validator_address = 2; + google.protobuf.Timestamp timestamp = 3 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 4; +} + +message Proposal { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + int32 pol_round = 4; + BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 6 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 7; +} + +message SignedHeader { + Header header = 1; + Commit commit = 2; +} + +message LightBlock { + SignedHeader signed_header = 1; + tendermint.types.ValidatorSet validator_set = 2; +} + +message BlockMeta { + BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + int64 block_size = 2; + Header header = 3 [(gogoproto.nullable) = false]; + int64 num_txs = 4; +} + +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +message TxProof { + bytes root_hash = 1; + bytes data = 2; + tendermint.crypto.Proof proof = 3; +} diff --git a/third_party/proto/tendermint/types/validator.proto b/third_party/proto/tendermint/types/validator.proto new file mode 100644 index 0000000000..49860b96d6 --- /dev/null +++ b/third_party/proto/tendermint/types/validator.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +import "gogoproto/gogo.proto"; +import "tendermint/crypto/keys.proto"; + +message ValidatorSet { + repeated Validator validators = 1; + Validator proposer = 2; + int64 total_voting_power = 3; +} + +message Validator { + bytes address = 1; + tendermint.crypto.PublicKey pub_key = 2 [(gogoproto.nullable) = false]; + int64 voting_power = 3; + int64 proposer_priority = 4; +} + +message SimpleValidator { + tendermint.crypto.PublicKey pub_key = 1; + int64 voting_power = 2; +} diff --git a/third_party/proto/tendermint/version/types.proto b/third_party/proto/tendermint/version/types.proto new file mode 100644 index 0000000000..6061868bd4 --- /dev/null +++ b/third_party/proto/tendermint/version/types.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; +package tendermint.version; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/version"; + +import "gogoproto/gogo.proto"; + +// App includes the protocol and software version for the application. +// This information is included in ResponseInfo. The App.Protocol can be +// updated in ResponseEndBlock. +message App { + uint64 protocol = 1; + string software = 2; +} + +// Consensus captures the consensus rules for processing a block in the blockchain, +// including all blockchain data structures and the rules of the application's +// state transition machine. +message Consensus { + option (gogoproto.equal) = true; + + uint64 block = 1; + uint64 app = 2; +}