diff --git a/.eslintignore b/.eslintignore index 8ebf77f1a..7361783f2 100644 --- a/.eslintignore +++ b/.eslintignore @@ -15,5 +15,5 @@ **/client/src/components/Header/*.spec.js **/client/src/components/App/*.spec.js **/client/wdio.conf.js -**/client/test/*.js +**/client/e2e-test/*.js diff --git a/.eslintrc.json b/.eslintrc.json index b2e8524ee..746301325 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -212,6 +212,7 @@ "adminpw", "adminroutes", "aff", + "anchorpeer", "Analytics", "api", "appconfig", @@ -342,6 +343,8 @@ "ssl", "stacktrace", "statedb", + "stdout", + "stderr", "superagent", "svg", "tbody", @@ -425,7 +428,15 @@ "csurf", "_csrf-hl-expl", "Uint8", - "Uint8Array" + "Uint8Array", + "yml", + "gui", + "src", + "cwd", + "ordererorg1", + "samplecc", + "testorgschannel0", + "mynetwork" ], "skipIfMatch": ["http://[^s]*"], "skipWordIfMatch": ["^foobar.*$"], diff --git a/app/platform/fabric/e2e-test/.gitignore b/app/platform/fabric/e2e-test/.gitignore deleted file mode 100644 index 7f6283318..000000000 --- a/app/platform/fabric/e2e-test/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.pyc -feature/configs -*combehavesyschanAnchor.tx diff --git a/app/platform/fabric/e2e-test/README.md b/app/platform/fabric/e2e-test/README.md index 6e00a1b98..8bfd33c14 100644 --- a/app/platform/fabric/e2e-test/README.md +++ b/app/platform/fabric/e2e-test/README.md @@ -1,256 +1,108 @@ -# Test example for behave (BDD, Gherkin syntax) - -```behave -Feature: Bootstrapping Hyperledger Explorer - As a user I want to be able to bootstrap Hyperledger Explorer - - Scenario Outline: : Bring up explorer and send requests to the basic REST API functions successfully - Given I have a bootstrapped fabric network of type - Given the NETWORK_PROFILE environment variable is solo-tls-disabled - - When an admin sets up a channel named "mychannel" - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" on channel "mychannel" - When a user invokes on the channel "mychannel" using chaincode named "mycc" with args ["invoke","a","b","10"] - When I wait "3" seconds - When a user queries on the channel "mychannel" using chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - When I start explorer - Then the logs on explorer.mynetwork.com contains "Please open web browser to access :" within 20 seconds - - # Need to wait enough until completing process a new BlockEvent - Given I wait "20" seconds - Given I set base URL to "http://localhost:8090" - When I make a GET request to "auth/networklist" - Then the response status code should equal 200 - Then the response structure should equal "networklistResp" - Then JSON at path ".networkList" should equal [[ "first-network", {} ]] -``` - -# Setup - - -## Download tools & pull fabric images - -``` -$ curl -sSL http://bit.ly/2ysbOFE | bash -s -- 1.4.4 1.4.4 0.4.18 -s -``` - -## Prepare Explorer / Explorer-DB image - -### Build images - -``` -$ cd /some/where/blockchain-explorer -$ ./build_docker_image.sh -``` +# Ginkgo based end-to-end test for REST API -### Pull images +We need to test each REST API on actual fabric network automatically. To achieve this requirement efficiently, following to the same way with fabric-test repository is good option for us. -``` -$ docker pull hyperledger/explorer -$ docker pull hyperledger/explorer-db -``` +Fabric-test provides a collection of utilities used to test the core Hyperledger Fabric projects. In Explorer, we use `Fabric Network Operator` and `Performance Traffic Engine (PTE)` to manipulate fabric network from our test suite written by golang (for API e2e-test) and node.js (for GUI e2e-test). The tool currently offers golang package and CLI. -## Install python & pip +# Prerequisites -### For Linux (Ubuntu) +* Go 1.11.0 or above +* The following packages: + * github.com/onsi/ginkgo/ginkgo + * github.com/onsi/gomega/... + * gopkg.in/yaml.v2 +* Fabric binaries downloaded in $PATH +* docker/docker-compose -``` -$ apt-get install python python-pip -``` +# Setup -### For macOS +## Download fabric-test repository and sync up the sub modules - macOS comes with Python so there's a chance pip is already installed on your machine, verify the version -``` -$ python --version -$ pip --version ``` +go get -d github.com/hyperledger/fabric-test +cd $GOPATH/src/github.com/hyperledger/fabric-test +git checkout release-1.4 +git submodule update --init --recursive +git submodule foreach git checkout release-1.4 - -## Setup virtualenv - -### For Linux (Ubuntu) - -``` -$ apt-get install virtualenv -$ cd /some/where/blockchain-explorer/app/platform/fabric/e2e-test/feature -$ virtualenv e2e-test -$ source e2e-test/bin/activate -(e2e-test) $ ``` -### For macOS +## Install the latest stable fabric-client node package into PTE tool directory ``` -$ pip install virtualenv -$ cd /some/where/blockchain-explorer/app/platform/fabric/e2e-test/feature -$ virtualenv e2e-test -$ source e2e-test/bin/activate -(e2e-test) $ +cd $GOPATH/src/github.com/hyperledger/fabric-test/tools/PTE +npm install fabric-client@1.4.5 +npm install fabric-ca-client@1.4.5 ``` -## Install required packages - -``` -# At /some/where/blockchain-explorer/app/platform/fabric/e2e-test/feature on virtual env -(e2e-test) $ pip install -r requirement.txt -``` +## Create symboric link to PTE tool -# Run test scenarios +We need to keep some directory layouts to work together correctly each component of tools provided by fabric-test. ``` -# At /some/where/blockchain-explorer/app/platform/fabric/e2e-test/feature on virtual env -(e2e-test) $ behave ./explorer.feature +cd /some/where/blockchain-explorer/app/platform/fabric/e2e-test +ln -s $GOPATH/src/github.com/hyperledger/fabric-test/tools/PTE ./PTE ``` -## Optional: Run test with npm +# Running test suite ``` -$ cd /some/where/blockchain-explorer -$ npm install # To install npm-run-all package -$ npm run e2e-test +cd /some/where/blockchain-explorer/app/platform/fabric/e2e-test +ginkgo -v ``` # Tips -* To enable stdout while running scenarios - ``` - (e2e-test) $ behave --no-capture ./explorer.feature - ``` - -* To execute only a certain scenario - ``` - # Specify with line number - (e2e-test) $ behave ./explorer.feature:111 - ``` - or - ``` - # Specify with tag - (e2e-test) $ behave --tags=@basic ./explorer.feature - ``` - -* To preserve the test runtime environment without clean up when finishing test - ```diff - --- a/app/platform/fabric/e2e-test/feature/explorer.feature - +++ b/app/platform/fabric/e2e-test/feature/explorer.feature - @@ -145,7 +149,7 @@ Scenario: [balance-transfer] Register a new user and enroll successfully - Then the response parameter "status" should equal 200 - - @basic - -# @doNotDecompose - +@doNotDecompose - Scenario: [first-network] Not supported to register a new user and enroll - Given I start first-network - Given the NETWORK_PROFILE environment variable is first-network - ``` - -# How to upgrade fabric-test environment - -All files copied from the original fabric-test repository have not been modified. When upgrading the baseline of fabric-test, you only need to override them. - -``` -$ git clone --recurse-submodules https://github.com/hyperledger/fabric-test.git -b release-1.4 -$ cd fabric-test -$ git checkout --recurse-submodules 64a5e04 # Choose a certain commit hash to be used for this upgrade -$ find fabric/examples/chaincode fabric-samples/chaincode chaincodes/ feature/ -type f | zip fabric-test_64a5e04.zip -@ -$ cd /some/where/blockchain-explorer/app/platform/fabric/e2e-test -$ unzip -o /some/where/fabric-test_64a5e04.zip -``` - -## Added files for e2e-test environment - -To add e2e-test support to explorer, we have added the following files over the original files from fabric-test repository. - -``` -app/platform/fabric/e2e-test/ - .gitignore - README.md - feature/ - explorer.feature - explorer_gui_e2e.feature - requirement.txt - docker-compose/ - docker-compose-explorer.yaml - docker-compose-kafka-sd.yml - explorer-configs/ - steps/ - explorer_impl.py - json_responses.py -``` +* You can easily debug test code written by golang with using delve or VSCode debug functionality. # Project Structure -Feature files are intended to locate in `/app/platform/fabric/e2e-test/feature` folder. Corresponding steps are located in `/app/platform/fabric/e2e-test/feature/steps`. -Overall project structure is as follows: ``` -app/platform/fabric/e2e-test/chaincodes/ // hyperledger/fabric-test -app/platform/fabric/e2e-test/fabric/ // hyperledger/fabric-test -app/platform/fabric/e2e-test/fabric-samples/ // hyperledger/fabric-test -app/platform/fabric/e2e-test/fabric-sdk-java/ // hyperledger/fabric-test -app/platform/fabric/e2e-test/feature/ // hyperledger/fabric-test - -+-- requirement.txt // store python requirements - -+-- environment.py // contains common actions related to scenarios (e.g. clearing headers after running each feature file) - -+-- explorer.feature // feature files (Test Scenarios) - -+-- explorer_gui_e2e.feature // feature files for GUI e2e test scenario - -+-- configs/ - - +-- {UUID}/ // crypto and channel artifacts dyanamically generated everytime running the scenarios - - +-- configtx.yaml // contains common steps definitions - - +-- crypto.yaml // contains common steps definitions - - +-- fabric-ca-server-config.yaml // contains common steps definitions - -+-- docker-compose/ - - +-- docker-compose-*.yml // definition of containers to support a variety of test network topology - - +-- docker-compose-explorer.yaml // definition of containers to bring up Hyperledger Explorer / Explorer DB - - +-- docker-compose-kafka-sd.yml // definition of containers to add configurations for service discovery to fabric network - -+-- explorer-configs/ // Configuration and Profile for each scenario - // You can specify which environments should be in use on each scenario by defining NETWORK_PROFILE env variable - - +-- config-${NETWORK_PROFILE}.json // Configuration of Explorer for each network - - +-- connection-profile/ // Profiles for each network - - +-- ${NETWORK_PROFILE}.json +runTestSuite.sh + : Script to setup env and run test suite - +-- chaincode/ +configs/config_multi.json +configs/config_single.json +configs/connection-profile/org1-network.json +configs/connection-profile/org2-network.json + : Configuration for Explorer used within the test suite -+-- steps/ +docker-compose.yaml + : Docker compose file to bring up Explorer reside with fabric network managed by Operator tool - +-- *_impl.py // Existing steps for fabric-test repository environment to manipulating fabric network and asserting status +specs/apitest-network-spec.yml + : Configuration of fabric network. Used when bring up fabric network - +-- *_util.py // Utility functions for fabric-test repository environment +specs/apitest-input-multiprofile.yml +specs/apitest-input-singleprofile.yml +specs/apitest-input-singleprofile_addnewch.yml + : Configuration for interacting to fabric network. Used when take actions like creating channel, joining to channel, etc. - +-- explorer_impl.py // New added steps for the e2e test of Hyperledger Explorer +specs/apitest_suite_test.go +specs/apitest_test.go + : Test suite - +-- json_responses.py // response data structures described in Trafaret format +specs/genchannelartifacts.sh +specs/runexplorer.sh +specs/stopexplorer.sh + : Scritps executed via test suite -app/platform/fabric/e2e-test/README.md +specs/templates/configtx.yaml +specs/templates/crypto-config.yaml +specs/templates/docker/docker-compose.yaml + : Template file following to yaml.v2 package format. Used to generate artifacts for fabric network automatically ``` -Mainly we'll update `explorer.feature`, `steps/explorer_impl.py` and `steps/json_responses.py` to cover more scenarios. +Mainly we'll update `specs/apitest_test.go` to cover more scenarios. # Link -* https://behave.readthedocs.io/en/latest/index.html -* https://github.com/hyperledger/fabric-test/tree/release-1.4/feature - The Explorer e2e test environment is based on the fabric-test env -* https://github.com/stanfy/behave-rest - This package is used to test REST API call in the BDD +* https://github.com/hyperledger/fabric-test + * https://github.com/hyperledger/fabric-test/tree/master/tools/operator + * https://github.com/hyperledger/fabric-test/tree/master/tools/PTE +* http://onsi.github.io/ginkgo/ +* http://onsi.github.io/gomega/ \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/chaincodes/auctionapp/art.go b/app/platform/fabric/e2e-test/chaincodes/auctionapp/art.go deleted file mode 100644 index 9d0252561..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/auctionapp/art.go +++ /dev/null @@ -1,2600 +0,0 @@ -/* -Copyright IT People Corp. 2017 All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 - -*/ - -/////////////////////////////////////////////////////////////////////// -// Author : IT People - Mohan Venkataraman - Auction API for v1.0 -// Purpose: Explore the Hyperledger/fabric and understand -// how to write an chain code, application/chain code boundaries -// The code is not the best as it has just hammered out in a day or two -// Feedback and updates are appreciated -/////////////////////////////////////////////////////////////////////// - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "runtime" - "strconv" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -/////////////////////////////////////////////////////////////////////////////////////// -// This creates a record of the Asset (Inventory) -// Includes Description, title, certificate of authenticity or image whatever..idea is to checkin a image and store it -// in encrypted form -// Example: -// Item { 113869, "Flower Urn on a Patio", "Liz Jardine", "10102007", "Original", "Floral", "Acrylic", "15 x 15 in", "sample_9.png","$600", "My Gallery } -/////////////////////////////////////////////////////////////////////////////////////// - -type ItemObject struct { - ItemID string - RecType string - ItemDesc string - ItemDetail string // Could included details such as who created the Art work if item is a Painting - ItemDate string - ItemType string - ItemSubject string - ItemMedia string - ItemSize string - ItemPicFN string - ItemImage []byte // This has to be generated AES encrypted using the file name - AES_Key []byte // This is generated by the AES Algorithms - ItemImageType string // should be used to regenerate the appropriate image type - ItemBasePrice string // Reserve Price at Auction must be greater than this price - CurrentOwnerID string // This is validated for a user registered record - TimeStamp string // This is the time stamp -} - -//////////////////////////////////////////////////////////////////////////////// -// Has an item entry every time the item changes hands -//////////////////////////////////////////////////////////////////////////////// -type ItemLog struct { - ItemID string // PRIMARY KEY - Status string // SECONDARY KEY - OnAuc, OnSale, NA - AuctionedBy string // SECONDARY KEY - Auction House ID if applicable - RecType string // ITEMHIS - ItemDesc string - CurrentOwner string - Date string // Date when status changed -} - -///////////////////////////////////////////////////////////// -// Create Buyer, Seller , Auction House, Authenticator -// Could establish valid UserTypes - -// AH (Auction House) -// TR (Buyer or Seller) -// AP (Appraiser) -// IN (Insurance) -// BK (bank) -// SH (Shipper) -///////////////////////////////////////////////////////////// -type UserObject struct { - UserID string - RecType string // Type = USER - Name string - UserType string // Auction House (AH), Bank (BK), Buyer or Seller (TR), Shipper (SH), Appraiser (AP) - Address string - Phone string - Email string - Bank string - AccountNo string - RoutingNo string - Timestamp string -} - -///////////////////////////////////////////////////////////////////////////// -// Register a request for participating in an auction -// Usually posted by a seller who owns a piece of ITEM -// The Auction house will determine when to open the item for Auction -// The Auction House may conduct an appraisal and genuineness of the item -///////////////////////////////////////////////////////////////////////////// - -type AuctionRequest struct { - AuctionID string - RecType string // AUCREQ - ItemID string - AuctionHouseID string // ID of the Auction House managing the auction - SellerID string // ID Of Seller - to verified against the Item CurrentOwnerId - RequestDate string // Date on which Auction Request was filed - ReservePrice string // reserver price > previous purchase price - BuyItNowPrice string // 0 (Zero) if not applicable else specify price - Status string // INIT, OPEN, CLOSED (To be Updated by Trgger Auction) - OpenDate string // Date on which auction will occur (To be Updated by Trigger Auction) - CloseDate string // Date and time when Auction will close (To be Updated by Trigger Auction) - TimeStamp string // The transaction Date and Time -} - -///////////////////////////////////////////////////////////// -// POST the transaction after the Auction Completes -// Post an Auction Transaction -// Post an Updated Item Object -// Once an auction request is opened for auctions, a timer is kicked -// off and bids are accepted. When the timer expires, the highest bid -// is selected and converted into a Transaction -// This transaction is a simple view -///////////////////////////////////////////////////////////// - -type ItemTransaction struct { - AuctionID string - RecType string // POSTTRAN - ItemID string - TransType string // Sale, Buy, Commission - UserId string // Buyer or Seller ID - TransDate string // Date of Settlement (Buyer or Seller) - HammerTime string // Time of hammer strike - SOLD - HammerPrice string // Total Settlement price - Details string // Details about the Transaction -} - -//////////////////////////////////////////////////////////////// -// This is a Bid. Bids are accepted only if an auction is OPEN -//////////////////////////////////////////////////////////////// - -type Bid struct { - AuctionID string - RecType string // BID - BidNo string - ItemID string - BuyerID string // ID Of Buyer - to be verified against the Item CurrentOwnerId - BidPrice string // BidPrice > Previous Bid - BidTime string // Time the bid was received -} - -////////////////////////////////////////////////////////////// -// Invoke Functions based on Function name -// The function name gets resolved to one of the following calls -// during an invoke -// -////////////////////////////////////////////////////////////// -func InvokeFunction(fname string) func(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - InvokeFunc := map[string]func(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response{ - "iPostItem": PostItem, - "iPostUser": PostUser, - "iPostAuctionRequest": PostAuctionRequest, - "iPostTransaction": PostTransaction, - "iPostBid": PostBid, - "iOpenAuctionForBids": OpenAuctionForBids, - "iBuyItNow": BuyItNow, - "iTransferItem": TransferItem, - "iCloseAuction": CloseAuction, - "iCloseOpenAuctions": CloseOpenAuctions, - "iDownloadImages": DownloadImages, - } - return InvokeFunc[fname] -} - -////////////////////////////////////////////////////////////// -// Query Functions based on Function name -// -////////////////////////////////////////////////////////////// -func QueryFunction(fname string) func(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - QueryFunc := map[string]func(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response{ - "qGetItem": GetItem, - "qGetUser": GetUser, - "qGetAuctionRequest": GetAuctionRequest, - "qGetTransaction": GetTransaction, - "qGetBid": GetBid, - "qGetLastBid": GetLastBid, - "qGetHighestBid": GetHighestBid, - "qGetNoOfBidsReceived": GetNoOfBidsReceived, - "qGetListOfBids": GetListOfBids, - "qGetItemLog": GetItemLog, - "qGetItemListByCat": GetItemListByCat, - "qGetUserListByCat": GetUserListByCat, - "qGetListOfInitAucs": GetListOfInitAucs, - "qGetListOfOpenAucs": GetListOfOpenAucs, - "qValidateItemOwnership": ValidateItemOwnership, - } - return QueryFunc[fname] -} - -///////////////////////////////////////////////////////////////////////////////////////////////////// -// We are storing the PictureMap as a Key/Value pair to download the images on the container. -// This was done to run the Daily/Weeky Test Cases from CLI -///////////////////////////////////////////////////////////////////////////////////////////////////// - -//func GetPictureUrl(picname string) string { -var PictureMap = map[string]string{ - "art1.png": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/art1.png", - "art2.png": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/art2.png", - "art3.png": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/art3.png", - "art4.png": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/art4.png", - "art5.png": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/art5.png", - "art6.png": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/art6.png", - "art7.png": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/art7.png", - "item-001.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/item-001.jpg", - "item-002.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/item-002.jpg", - "item-003.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/item-003.jpg", - "item-004.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/item-004.jpg", - "item-005.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/item-005.jpg", - "item-006.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/item-006.jpg", - "item-007.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/item-007.jpg", - "item-008.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/item-008.jpg", - "people.gif": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/people.gif", - "mad-fb.jpg": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/mad-fb.gif", - "sample.png": "https://raw.githubusercontent.com/ITPeople-Blockchain/auction/v0.6/art/artchaincode/sample.png", -} - -type SimpleChaincode struct { -} - -//////////////////////////////////////////////////////////////////////////////// -// Chain Code Kick-off Main function -//////////////////////////////////////////////////////////////////////////////// -func main() { - - // maximize CPU usage for maximum performance - runtime.GOMAXPROCS(runtime.NumCPU()) - fmt.Println("Starting Item Auction Application chaincode BlueMix ver 21 Dated 2016-07-02 09.45.00: ") - - //ccPath = fmt.Sprintf("%s/src/github.com/hyperledger/fabric/auction/art/artchaincode/", gopath) - // Start the shim -- running the fabric - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Println("Error starting Item Fun Application chaincode: %s", err) - } - -} - -///////////////////////////////////////////////////////////////////////////////////////////////////// -// We are storing the PictureMap as a Key/Value pair to download the images on the container. -// This was done to run the Daily/Weeky Test Cases from CLI -///////////////////////////////////////////////////////////////////////////////////////////////////// - -func downloadFile(filepath string, url string) (err error) { - - // Create the file - out, err := os.Create(filepath) - if err != nil { - return err - } - defer out.Close() - - // Get the data - resp, err := http.Get(url) - if err != nil { - return err - } - defer resp.Body.Close() - - // Writer the body to file - _, err = io.Copy(out, resp.Body) - if err != nil { - return err - } - - return nil -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// SimpleChaincode - Init Chaincode implementation - The following sequence of transactions can be used to test the Chaincode -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - - // TODO - Include all initialization to be complete before Invoke and Query - // Uses aucTables to delete tables if they exist and re-create them - - //myLogger.Info("[Trade and Auction Application] Init") - fmt.Println("[Trade and Auction Application] Init") - fmt.Println("\nInit() Initialization Complete ") - return shim.Success(nil) -} - -//////////////////////////////////////////////////////////////// -// SimpleChaincode - INVOKE Chaincode implementation -// User Can Invoke -// - Register a user using PostUser -// - Register an item using PostItem -// - The Owner of the item (User) can request that the item be put on auction using PostAuctionRequest -// - The Auction House can request that the auction request be Opened for bids using OpenAuctionForBids -// - One the auction is OPEN, registered buyers (Buyers) can send in bids vis PostBid -// - No bid is accepted when the status of the auction request is INIT or CLOSED -// - Either manually or by OpenAuctionRequest, the auction can be closed using CloseAuction -// - The CloseAuction creates a transaction and invokes PostTransaction -//////////////////////////////////////////////////////////////// - -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - - function, args := stub.GetFunctionAndParameters() - fmt.Println("==========================================================") - fmt.Println("BEGIN Function ====> ", function) - if function[0:1] == "i" { - fmt.Println("==========================================================") - return t.invoke(stub, function, args) - } - - if function[0:1] == "q" { - fmt.Println("==========================================================") - return t.query(stub, function, args) - } - - fmt.Println("==========================================================") - - return shim.Error("Invoke: Invalid Function Name - function names begin with a q or i") - -} - -//////////////////////////////////////////////////////////////// -// SimpleChaincode - INVOKE Chaincode implementation -// User Can Invoke -// - Register a user using PostUser -// - Register an item using PostItem -// - The Owner of the item (User) can request that the item be put on auction using PostAuctionRequest -// - The Auction House can request that the auction request be Opened for bids using OpenAuctionForBids -// - One the auction is OPEN, registered buyers (Buyers) can send in bids vis PostBid -// - No bid is accepted when the status of the auction request is INIT or CLOSED -// - Either manually or by OpenAuctionRequest, the auction can be closed using CloseAuction -// - The CloseAuction creates a transaction and invokes PostTransaction -//////////////////////////////////////////////////////////////// - -func (t *SimpleChaincode) invoke(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // Check Type of Transaction and apply business rules - // before adding record to the block chain - // In this version, the assumption is that args[1] specifies recType for all defined structs - // Newer structs - the recType can be positioned anywhere and ChkReqType will check for recType - // example: - // ./peer chaincode invoke -l golang -n mycc -c '{"Function": "PostBid", "Args":["1111", "BID", "1", "1000", "300", "1200"]}' - ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - - if ChkRecType(args) == true { - - InvokeRequest := InvokeFunction(function) - if InvokeRequest != nil { - response := InvokeRequest(stub, function, args) - return (response) - } - } else { - fmt.Println("Invoke() Invalid recType : ", args, "\n") - error_str := "Invoke() : Invalid recType : " + args[0] - return shim.Error(error_str) - } - - return shim.Success(nil) -} - -////////////////////////////////////////////////////////////////////////////////////////// -// SimpleChaincode - query Chaincode implementation -// Client Can Query -// Sample Data -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetUser", "Args": ["4000"]}' -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetItem", "Args": ["2000"]}' -////////////////////////////////////////////////////////////////////////////////////////// - -func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - // var buff []byte - var response pb.Response - fmt.Println("Query() : ID Extracted and Type = ", args[0]) - fmt.Println("Query() : Args supplied : ", args) - - if len(args) < 1 { - fmt.Println("Query() : Include at least 1 arguments Key ") - return shim.Error("Query() : Expecting Transaction type and Key value for query") - } - - QueryRequest := QueryFunction(function) - if QueryRequest != nil { - response = QueryRequest(stub, function, args) - } else { - fmt.Println("Query() Invalid function call : ", function) - response_str := "Query() : Invalid function call : " + function - return shim.Error(response_str) - } - - if response.Status != shim.OK { - fmt.Println("Query() Object not found : ", args[0]) - response_str := "Query() : Object not found : " + args[0] - return shim.Error(response_str) - } - return response -} - -////////////////////////////////////////////////////////////////////////////////////////// -// Download Images into Peer -////////////////////////////////////////////////////////////////////////////////////////// -func DownloadImages(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - fmt.Println("[Trade and Auction Application] DownloadImages") - var err error - for k, v := range PictureMap { - fmt.Printf("\n Downloading Image '%s' from URL: %s", k, v) - err = downloadFile(k, v) - if err != nil { - fmt.Println(err) - return shim.Error("Invoke: Invalid Function Name - function names begin with a q or i") - } - } - fmt.Println("\nDownloadImages() Complete ") - return shim.Success(nil) -} - -////////////////////////////////////////////////////////////////////////////////////////// -// Retrieve User Information -// example: -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetUser", "Args": ["100"]}' -// -////////////////////////////////////////////////////////////////////////////////////////// -func GetUser(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - var err error - - // Get the Object and Display it - Avalbytes, err := QueryObject(stub, "User", args) - if err != nil { - fmt.Println("GetUser() : Failed to Query Object ") - jsonResp := "{\"Error\":\"Failed to get Object Data for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - fmt.Println("GetUser() : Incomplete Query Object ") - jsonResp := "{\"Error\":\"Incomplete information about the key for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - fmt.Println("GetUser() : Response : Successful -") - return shim.Success(Avalbytes) -} - -///////////////////////////////////////////////////////////////////////////////////////// -// Query callback representing the query of a chaincode -// Retrieve a Item by Item ID -// QueryObjectWithProcessingFunction takes a post processing function as argument -// peer chaincode query -l golang -n mycc -c '{"Args": ["qGetItem", "1000"]} -// -///////////////////////////////////////////////////////////////////////////////////////// -func GetItem(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - var err error - - // Get the Objects and Display it - Avalbytes, err := QueryObjectWithProcessingFunction(stub, "Item", args, ProcessQueryResult) - if err != nil { - fmt.Println("GetItem() : Failed to Query Object ") - jsonResp := "{\"Error\":\"Failed to get Object Data for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - fmt.Println("GetItem() : Incomplete Query Object ") - jsonResp := "{\"Error\":\"Incomplete information about the key for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - // Masking ItemImage binary data - itemObj, _ := JSONtoAR(Avalbytes) - itemObj.ItemImage = []byte{} - Avalbytes, _ = ARtoJSON(itemObj) - - fmt.Println("GetItem() : Response : Successful ") - return shim.Success(Avalbytes) -} - -///////////////////////////////////////////////////////////////////////////////////////// -// Validates The Ownership of an Asset using ItemID, OwnerID, and HashKey -// -// peer chaincode query -l golang -n mycc -c '{"Function": "ValidateItemOwnership", "Args": ["1000", "100", "tGEBaZuKUBmwTjzNEyd+nr/fPUASuVJAZ1u7gha5fJg="]}' -// -///////////////////////////////////////////////////////////////////////////////////////// -func ValidateItemOwnership(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - var err error - - if len(args) < 3 { - fmt.Println("ValidateItemOwnership() : Requires 3 arguments Item#, Owner# and Key ") - return shim.Error("ValidateItemOwnership() : Requires 3 arguments Item#, Owner# and Key") - } - - // Get the Object Information - Avalbytes, err := QueryObject(stub, "Item", []string{args[0]}) - if err != nil { - fmt.Println("ValidateItemOwnership() : Failed to Query Object ") - jsonResp := "{\"Error\":\"Failed to get Object Data for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - fmt.Println("ValidateItemOwnership() : Incomplete Query Object ") - jsonResp := "{\"Error\":\"Incomplete information about the key for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - myItem, err := JSONtoAR(Avalbytes) - if err != nil { - fmt.Println("ValidateItemOwnership() : Failed to Query Object ") - jsonResp := "{\"Error\":\"Failed to get Object Data for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - myKey := GetKeyValue(Avalbytes, "AES_Key") - fmt.Println("Key String := ", myKey) - - if myKey != args[2] { - fmt.Println("ValidateItemOwnership() : Key does not match supplied key ", args[2], " - ", myKey) - jsonResp := "{\"Error\":\"ValidateItemOwnership() : Key does not match asset owner supplied key " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - if myItem.CurrentOwnerID != args[1] { - fmt.Println("ValidateItemOwnership() : ValidateItemOwnership() : Owner-Id does not match supplied ID ", args[1]) - jsonResp := "{\"Error\":\"ValidateItemOwnership() : Owner-Id does not match supplied ID " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - fmt.Print("ValidateItemOwnership() : Response : Successful - \n") - return shim.Success(Avalbytes) -} - -///////////////////////////////////////////////////////////////////////////////////////////////////// -// Retrieve Auction Information -// This query runs against the AuctionTable -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetAuctionRequest", "Args": ["1111"]}' -// There are two other tables just for query purposes - AucInitTable, AucOpenTable -// -///////////////////////////////////////////////////////////////////////////////////////////////////// -func GetAuctionRequest(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - var err error - - // Get the Objects and Display it - Avalbytes, err := QueryObject(stub, "Auction", args) - if err != nil { - fmt.Println("GetAuctionRequest() : Failed to Query Object ") - jsonResp := "{\"Error\":\"Failed to get Object Data for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - fmt.Println("GetAuctionRequest() : Incomplete Query Object ") - jsonResp := "{\"Error\":\"Incomplete information about the key for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - fmt.Println("GetAuctionRequest() : Response : Successful - \n") - return shim.Success(Avalbytes) -} - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Retrieve a Bid based on two keys - AucID, BidNo -// A Bid has two Keys - The Auction Request Number and Bid Number -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetLastBid", "Args": ["1111", "1"]}' -// -/////////////////////////////////////////////////////////////////////////////////////////////////// -func GetBid(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - var err error - - // Check there are 2 Arguments provided as per the struct - two are computed - // See example - if len(args) < 2 { - fmt.Println("GetBid(): Incorrect number of arguments. Expecting 2 ") - fmt.Println("GetBid(): ./peer chaincode query -l golang -n mycc -c '{\"Function\": \"GetBid\", \"Args\": [\"1111\",\"6\"]}'") - return shim.Error("GetBid(): Incorrect number of arguments. Expecting 2 ") - } - - // Get the Objects and Display it - Avalbytes, err := QueryObject(stub, "Bid", args) - if err != nil { - fmt.Println("GetBid() : Failed to Query Object ") - jsonResp := "{\"Error\":\"Failed to get Object Data for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - fmt.Println("GetBid() : Incomplete Query Object ") - jsonResp := "{\"Error\":\"Incomplete information about the key for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - fmt.Println("GetBid() : Response : Successful -") - return shim.Success(Avalbytes) -} - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Retrieve Auction Closeout Information. When an Auction closes -// The highest bid is retrieved and converted to a Transaction -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetTransaction", "Args": ["1111"]}' -// -/////////////////////////////////////////////////////////////////////////////////////////////////// -func GetTransaction(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - //var err error - - // Get the Objects and Display it - Avalbytes, err := QueryObject(stub, "Trans", args) - if Avalbytes == nil { - fmt.Println("GetTransaction() : Incomplete Query Object ") - jsonResp := "{\"Error\":\"Incomplete information about the key for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - if err != nil { - fmt.Println("GetTransaction() : Failed to Query Object ") - jsonResp := "{\"Error\":\"Failed to get Object Data for " + args[0] + "\"}" - return shim.Error(jsonResp) - } - - fmt.Println("GetTransaction() : Response : Successful") - return shim.Success(Avalbytes) -} - -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Create a User Object. The first step is to have users -// registered -// There are different types of users - Traders (TRD), Auction Houses (AH) -// Shippers (SHP), Insurance Companies (INS), Banks (BNK) -// While this version of the chain code does not enforce strict validation -// the business process recommends validating each persona for the service -// they provide or their participation on the auction blockchain, future enhancements will do that -// ./peer chaincode invoke -l golang -n mycc -c '{"Function": "PostUser", "Args":["100", "USER", "Ashley Hart", "TRD", "Morrisville Parkway, #216, Morrisville, NC 27560", "9198063535", "ashley@itpeople.com", "SUNTRUST", "00017102345", "0234678"]}' -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func PostUser(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - record, err := CreateUserObject(args[0:]) // - if err != nil { - return shim.Error(err.Error()) - } - buff, err := UsertoJSON(record) // - - if err != nil { - error_str := "PostuserObject() : Failed Cannot create object buffer for write : " + args[1] - fmt.Println(error_str) - return shim.Error(error_str) - } else { - // Update the ledger with the Buffer Data - // err = stub.PutState(args[0], buff) - keys := []string{args[0]} - err = UpdateObject(stub, "User", keys, buff) - if err != nil { - fmt.Println("PostUser() : write error while inserting record") - return shim.Error("PostUser() : write error while inserting record : Error - " + err.Error()) - } - - // Post Entry into UserCat- i.e. User Category Table - keys = []string{"2016", args[3], args[0]} - err = UpdateObject(stub, "UserCat", keys, buff) - if err != nil { - error_str := "PostUser() : write error while inserting recordinto UserCat" - fmt.Println(error_str) - return shim.Error(error_str) - } - } - - return shim.Success(buff) -} - -func CreateUserObject(args []string) (UserObject, error) { - - var err error - var aUser UserObject - - // Check there are 11 Arguments - if len(args) != 11 { - fmt.Println("CreateUserObject(): Incorrect number of arguments. Expecting 11 ") - return aUser, errors.New("CreateUserObject() : Incorrect number of arguments. Expecting 11 ") - } - - // Validate UserID is an integer - - _, err = strconv.Atoi(args[0]) - if err != nil { - return aUser, errors.New("CreateUserObject() : User ID should be an integer") - } - - aUser = UserObject{args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10]} - fmt.Println("CreateUserObject() : User Object : ", aUser) - - return aUser, nil -} - -///////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Create a master Object of the Item -// Since the Owner Changes hands, a record has to be written for each -// Transaction with the updated Encryption Key of the new owner -// Example -//./peer chaincode invoke -l golang -n mycc -c '{"Function": "PostItem", "Args":["1000", "ARTINV", "Shadows by Asppen", "Asppen Messer", "20140202", "Original", "Landscape" , "Canvas", "15 x 15 in", "sample_7.png","$600", "100", "2016-02-02 03:000:00"]}' -///////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func PostItem(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - itemObject, err := CreateItemObject(args[0:]) - if err != nil { - fmt.Println("PostItem(): Cannot create item object \n") - return shim.Error("PostItem(): Cannot create item object") - } - - // Check if the Owner ID specified is registered and valid - response := ValidateMember(stub, itemObject.CurrentOwnerID) - ownerInfo := response.Payload - fmt.Println("Owner information ", ownerInfo, itemObject.CurrentOwnerID) - if response.Status != shim.OK { - error_str := "PostItem() : Failed Owner information not found for " + itemObject.CurrentOwnerID - fmt.Println(error_str) - return shim.Error(error_str) - } - - // Convert Item Object to JSON - buff, err := ARtoJSON(itemObject) // - if err != nil { - error_str := "PostItem() : Failed Cannot create object buffer for write : " + args[1] - fmt.Println(error_str) - return shim.Error(error_str) - } else { - // Update the ledger with the Buffer Data - // err = stub.PutState(args[0], buff) - keys := []string{args[0]} - err = UpdateObject(stub, "Item", keys, buff) - if err != nil { - fmt.Println("PostItem() : write error while inserting record\n") - return shim.Error("PostItem() : write error while inserting record : " + err.Error()) - } - - // Put an entry into the Item History Table - response := PostItemLog(stub, itemObject, "INITIAL", "DEFAULT", args[12]) - if response.Status != shim.OK { - fmt.Println("PostItemLog() : write error while inserting record\n") - return shim.Error("PostItemLog() : write error while inserting record : Error : " + err.Error()) - } - - // Post Entry into ItemCatTable - i.e. Item Category Table - // The first key 2016 is a dummy (band aid) key to extract all values - keys = []string{"2016", args[6], args[0]} - err = UpdateObject(stub, "ItemCat", keys, buff) - if err != nil { - fmt.Println("PostItem() : Write error while inserting record into ItemCat \n") - return shim.Error("PostItem() : Write error while inserting record into ItemCat : Error : " + err.Error()) - } - } - - secret_key, _ := json.Marshal(itemObject.AES_Key) - fmt.Println(string(secret_key)) - return shim.Success(secret_key) -} - -func CreateItemObject(args []string) (ItemObject, error) { - - var err error - var myItem ItemObject - - // Check there are 13 Arguments provided as per the struct - two are computed - if len(args) != 13 { - fmt.Println("CreateItemObject(): Incorrect number of arguments. Expecting 13 ") - return myItem, errors.New("CreateItemObject(): Incorrect number of arguments. Expecting 13 ") - } - - // Validate ItemID is an integer - - _, err = strconv.Atoi(args[0]) - if err != nil { - fmt.Println("CreateItemObject(): ART ID should be an integer create failed! ") - return myItem, errors.New("CreateItemObject(): ART ID should be an integer create failed!") - } - - // Validate Picture File exists based on the name provided - // Looks for file in current directory of application and must be fixed for other locations - - // Validate Picture File exists based on the name provided - // Looks for file in current directory of application and must be fixed for other locations - imagePath := args[9] - if _, err := os.Stat(imagePath); err == nil { - fmt.Println(imagePath, " exists!") - } else { - fmt.Println("CreateItemObject(): Cannot find or load Picture File = %s : %s\n", imagePath, err) - return myItem, errors.New("CreateItemObject(): ART Picture File not found " + imagePath) - } - - // Get the Item Image and convert it to a byte array - imagebytes, fileType := ImageToByteArray(imagePath) - - // Generate a new key and encrypt the image - - AES_key, _ := GenAESKey() - AES_enc := Encrypt(AES_key, imagebytes) - - // Append the AES Key, The Encrypted Image Byte Array and the file type - myItem = ItemObject{args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], AES_enc, AES_key, fileType, args[10], args[11], args[12]} - - fmt.Println("CreateItemObject(): Item Object created: ", myItem.ItemID, myItem.AES_Key) - - // Code to Validate the Item Object) - // If User presents Crypto Key then key is used to validate the picture that is stored as part of the title - // TODO - - return myItem, nil -} - -/////////////////////////////////////////////////////////////////////////////////// -// Since the Owner Changes hands, a record has to be written for each -// Transaction with the updated Encryption Key of the new owner -// This function is internally invoked by PostTransaction and is not a Public API -/////////////////////////////////////////////////////////////////////////////////// - -func UpdateItemObject(stub shim.ChaincodeStubInterface, ar []byte, hammerPrice string, buyer string) pb.Response { - - var err error - myItem, err := JSONtoAR(ar) - if err != nil { - fmt.Println("UpdateItemObject() : Failed to create Art Record Object from JSON ") - return shim.Error("UpdateItemObject() : Failed to create Art Record Object from JSON : Error : " + err.Error()) - } - - // Insert logic to re-encrypt image by first fetching the current Key - CurrentAES_Key := myItem.AES_Key - // Decrypt Image and Save Image in a file - image := Decrypt(CurrentAES_Key, myItem.ItemImage) - - // Get a New Key & Encrypt Image with New Key - myItem.AES_Key, _ = GenAESKey() - myItem.ItemImage = Encrypt(myItem.AES_Key, image) - - // Update the owner to the Buyer and update price to auction hammer price - myItem.ItemBasePrice = hammerPrice - myItem.CurrentOwnerID = buyer - - ar, err = ARtoJSON(myItem) - // keys := []string{myItem.ItemID, myItem.CurrentOwnerID} // Was the original in v0.6 - keys := []string{myItem.ItemID} - err = ReplaceObject(stub, "Item", keys, ar) - if err != nil { - fmt.Println("UpdateItemObject() : Failed ReplaceObject in ItemTable into Blockchain ") - return shim.Error("UpdateItemObject() : Failed ReplaceObject in ItemTable into Blockchain : Error : " + err.Error()) - } - fmt.Println("UpdateItemObject() : ReplaceObject in Item successful ") - - // Update entry in Item Category Table as it holds the Item object as wekk - keys = []string{"2016", myItem.ItemSubject, myItem.ItemID} - err = ReplaceObject(stub, "ItemCat", keys, ar) - if err != nil { - fmt.Println("UpdateItemObject() : Failed ReplaceObject in ItemCategory into Blockchain ") - return shim.Error("UpdateItemObject() : Failed ReplaceObject in ItemCategory into Blockchain : Error : " + err.Error()) - } - - fmt.Println("UpdateItemObject() : ReplaceObject in ItemCategory successful ") - return shim.Success(myItem.AES_Key) -} - -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Obtain Asset Details and Validate Item -// Transfer Item to new owner - no change in price - In the example XFER is the recType -// ./peer chaincode invoke -l golang -n mycc -c '{"Function": "TransferItem", "Args": ["1000", "100", "tGEBaZuKUBmwTjzNEyd+nr/fPUASuVJAZ1u7gha5fJg=", "300", "XFER"]}' -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -func TransferItem(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - var err error - - if len(args) < 6 { - fmt.Println("TransferItem() : Requires 6 arguments Item#, Owner#, Key#, newOwnerID#, XFER \n") - return shim.Error("TransferItem() : Requires 6 arguments Item#, Owner#, Key#, newOwnerID#, XFER") - } - - // Let us make sure that the Item is not on Auction - err = VerifyIfItemIsOnAuction(stub, args[0]) - if err != nil { - error_str := "TransferItem() : Failed Item is either initiated or opened for Auction " + args[0] - fmt.Println(error_str) - return shim.Error(error_str + ": Error : " + err.Error()) - } - - // Validate New Owner's ID - response := ValidateMember(stub, args[3]) - if response.Status != shim.OK { - error_str := "TransferItem() : Failed transferee not Registered in Blockchain " + args[3] - fmt.Println(error_str) - return shim.Error(error_str + ": Error : " + response.Message) - } - - // Validate Item or Asset Ownership - response = ValidateItemOwnership(stub, "ValidateItemOwnership", args[:3]) - if response.Status != shim.OK { - error_str := "TransferItem() : ValidateItemOwnership() : Failed to authenticate item or asset ownership" - fmt.Println(error_str) - return shim.Error(error_str + ": Error : " + response.Message) - } - - ar := response.Payload - myItem, err := JSONtoAR(ar) - if err != nil { - error_str := "TransferItem() : Failed to create item Object from JSON " - fmt.Println(error_str) - return shim.Error(error_str + ": Error : " + err.Error()) - } - - // Insert logic to re-encrypt image by first fetching the current Key - CurrentAES_Key := myItem.AES_Key - // Decrypt Image and Save Image in a file - image := Decrypt(CurrentAES_Key, myItem.ItemImage) - - // Get a New Key & Encrypt Image with New Key - myItem.AES_Key, _ = GenAESKey() - myItem.ItemImage = Encrypt(myItem.AES_Key, image) - - // Update the owner to the new owner transferred to - myItem.CurrentOwnerID = args[3] - - ar, err = ARtoJSON(myItem) - keys := []string{myItem.ItemID} - err = ReplaceObject(stub, "Item", keys, ar) - if err != nil { - fmt.Println("TransferAsset() : Failed ReplaceObject in ItemTable into Blockchain ") - return shim.Error(err.Error()) - } - fmt.Println("TransferAsset() : ReplaceObject in Item successful ") - - // Update entry in Item Category Table as it holds the Item object as well - keys = []string{"2016", myItem.ItemSubject, myItem.ItemID} - err = ReplaceObject(stub, "ItemCat", keys, ar) - if err != nil { - fmt.Println("TransferAsset() : Failed ReplaceObject in ItemCategoryTable into Blockchain ") - return shim.Error(err.Error()) - } - - response = PostItemLog(stub, myItem, "Transfer", args[1], args[5]) - if response.Status != shim.OK { - fmt.Println("TransferItem() : PostItemLog() write error while inserting record\n") - return shim.Error(err.Error()) - } - - fmt.Println("TransferAsset() : ReplaceObject in ItemCategory successful ") - return shim.Success(myItem.AES_Key) -} - -//////////////////////////////////////////////////////////////////////////////////// -// Validate Item Status - Is it currently on Auction, if so Reject Transfer Request -// This can be written better - will do so if things work -// The function return the Auction ID and the Status = OPEN or INIT -//////////////////////////////////////////////////////////////////////////////////// - -func VerifyIfItemIsOnAuction(stub shim.ChaincodeStubInterface, itemID string) error { - - response := GetListOfOpenAucs(stub, "AucOpen", []string{"2016"}) - if response.Status != shim.OK { - return fmt.Errorf("VerifyIfItemIsOnAuction() operation failed. Error retrieving values from AucOpen: %s", response.Message) - } - - rows := response.Payload - tlist := make([]AuctionRequest, len(rows)) - err := json.Unmarshal([]byte(rows), &tlist) - if err != nil { - fmt.Println("VerifyIfItemIsOnAuction: Unmarshal failed : ", err) - return fmt.Errorf("VerifyIfItemIsOnAuction: operation failed. Error un-marshaling JSON: %s", err) - } - - for i := 0; i < len(tlist); i++ { - ar := tlist[i] - - // Compare Auction IDs - if ar.ItemID == itemID { - fmt.Println("VerifyIfItemIsOnAuction() Failed : Ummarshall error") - return fmt.Errorf("VerifyIfItemIsOnAuction() operation failed. %s", itemID) - } - } - - // Now Check if an Auction Has been inititiated - // If so , it has to be removed from Auction for a Transfer - - response = GetListOfInitAucs(stub, "AucInit", []string{"2016"}) - if response.Status != shim.OK { - return fmt.Errorf("VerifyIfItemIsOnAuction() operation failed. Error retrieving values from AucInit: %s", err) - } - - rows = response.Payload - tlist = make([]AuctionRequest, len(rows)) - err = json.Unmarshal([]byte(rows), &tlist) - if err != nil { - fmt.Println("VerifyIfItemIsOnAuction() Unmarshal failed : ", err) - return fmt.Errorf("VerifyIfItemIsOnAuction: operation failed. Error un-marshaling JSON: %s", err) - } - - for i := 0; i < len(tlist); i++ { - ar := tlist[i] - if err != nil { - fmt.Println("VerifyIfItemIsOnAuction() Failed : Ummarshall error") - return fmt.Errorf("VerifyIfItemIsOnAuction() operation failed. %s", err) - } - - // Compare Auction IDs - if ar.ItemID == itemID { - return fmt.Errorf("VerifyIfItemIsOnAuction() operation failed.") - } - } - - return nil -} - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// POSTS A LOG ENTRY Every Time the Item is transacted -// Valid Status for ItemLog = OnAuc, OnSale, NA, INITIAL -// Valid AuctionedBy: This value is set to "DEFAULT" but when it is put on auction Auction House ID is assigned -// PostItemLog IS NOT A PUBLIC API and is invoked every time some event happens in the Item's life -// The currentDateTime must be provided by Client -////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func PostItemLog(stub shim.ChaincodeStubInterface, item ItemObject, status string, ah string, currentDateTime string) pb.Response { - - iLog := ItemToItemLog(item, currentDateTime) - iLog.Status = status - iLog.AuctionedBy = ah - - buff, err := ItemLogtoJSON(iLog) - if err != nil { - fmt.Println("PostItemLog() : Failed Cannot create object buffer for write : ", item.ItemID) - return shim.Error("PostItemLog(): Failed Cannot create object buffer for write : " + item.ItemID) - } else { - // Update the ledger with the Buffer Data - keys := []string{iLog.ItemID, iLog.Status, iLog.AuctionedBy, currentDateTime} - err = UpdateObject(stub, "ItemHistory", keys, buff) - if err != nil { - fmt.Println("PostItemLog() : write error while inserting record\n") - return shim.Error(err.Error()) - } - } - return shim.Success(buff) -} - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Create an Auction Request -// The owner of an Item, when ready to put the item on an auction -// will create an auction request and specify a auction house. -// -// ./peer chaincode invoke -l golang -n mycc -c '{"Function": "PostAuctionRequest", "Args":["1111", "AUCREQ", "1700", "200", "400", "04012016", "1200", "INIT", "2016-05-20 11:00:00.3 +0000 UTC","2016-05-23 11:00:00.3 +0000 UTC", "2016-05-23 11:00:00.3 +0000 UTC"]}' -// -// The start and end time of the auction are actually assigned when the auction is opened by OpenAuctionForBids() -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func PostAuctionRequest(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - ar, err := CreateAuctionRequest(args[0:]) - if err != nil { - return shim.Error(err.Error()) - } - - // Let us make sure that the Item is not on Auction - err = VerifyIfItemIsOnAuction(stub, ar.ItemID) - if err != nil { - fmt.Println("PostAuctionRequest() : Failed Item is either initiated or opened for Auction ", args[0]) - return shim.Error(err.Error()) - } - - // Validate Auction House to check it is a registered User - response := ValidateMember(stub, ar.AuctionHouseID) - if response.Status != shim.OK { - fmt.Println("PostAuctionRequest() : Failed Auction House not Registered in Blockchain ", ar.AuctionHouseID) - return shim.Error(err.Error()) - } - - aucHouse := response.Payload - fmt.Println("Auction House information ", aucHouse, " ID: ", ar.AuctionHouseID) - - // Validate Item record - response = ValidateItemSubmission(stub, ar.ItemID) - if response.Status != shim.OK { - fmt.Println("PostAuctionRequest() : Failed Could not Validate Item Object in Blockchain ", ar.ItemID) - return shim.Error(err.Error()) - } - - itemObject := response.Payload - - // Convert AuctionRequest to JSON - buff, err := AucReqtoJSON(ar) // Converting the auction request struct to []byte array - if err != nil { - fmt.Println("PostAuctionRequest() : Failed Cannot create object buffer for write : ", args[1]) - return shim.Error("PostAuctionRequest(): Failed Cannot create object buffer for write : " + args[1]) - } else { - // Update the ledger with the Buffer Data - //err = stub.PutState(args[0], buff) - keys := []string{args[0]} - err = UpdateObject(stub, "Auction", keys, buff) - if err != nil { - fmt.Println("PostAuctionRequest() : write error while inserting record\n") - return shim.Error(err.Error()) - } - - // Post an Item Log and the Auction House ID is included in the log - // Recall -- that by default that value is "DEFAULT" - - io, err := JSONtoAR(itemObject) - response := PostItemLog(stub, io, "ReadyForAuc", ar.AuctionHouseID, ar.TimeStamp) - if response.Status != shim.OK { - fmt.Println("PostItemLog() : write error while inserting record\n") - return shim.Error(err.Error()) - } - - //An entry is made in the AuctionInitTable that this Item has been placed for Auction - // The UI can pull all items available for auction and the item can be Opened for accepting bids - // The 2016 is a dummy key and has notr value other than to get all rows - - keys = []string{"2016", args[0]} - err = UpdateObject(stub, "AucInit", keys, buff) - if err != nil { - fmt.Println("PostAuctionRequest() : write error while inserting record into AucInit\n") - return shim.Error(err.Error()) - } - - } - - return shim.Success(buff) -} - -func CreateAuctionRequest(args []string) (AuctionRequest, error) { - var err error - var aucReg AuctionRequest - - // Check there are 12 Arguments - // See example -- The Open and Close Dates are Dummy, and will be set by open auction - // '{"Function": "PostAuctionRequest", "Args":["1111", "AUCREQ", "1000", "200", "100", "04012016", "1200", "1800", - // "INIT", "2016-05-20 11:00:00.3 +0000 UTC","2016-05-23 11:00:00.3 +0000 UTC", "2016-05-23 11:00:00.3 +0000 UTC"]}' - if len(args) != 12 { - fmt.Println("CreateAuctionRegistrationObject(): Incorrect number of arguments. Expecting 11 ") - return aucReg, errors.New("CreateAuctionRegistrationObject() : Incorrect number of arguments. Expecting 11 ") - } - - // Validate UserID is an integer . I think this redundant and can be avoided - - err = validateID(args[0]) - if err != nil { - return aucReg, errors.New("CreateAuctionRequest() : User ID should be an integer") - } - - aucReg = AuctionRequest{args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11]} - fmt.Println("CreateAuctionObject() : Auction Registration : ", aucReg) - - return aucReg, nil -} - -////////////////////////////////////////////////////////// -// Create an Item Transaction record to process Request -// This is invoked by the CloseAuctionRequest -// -// -//////////////////////////////////////////////////////////// -func PostTransaction(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - if function != "PostTransaction" { - return shim.Error("PostTransaction(): Invalid function name. Expecting \"PostTransaction\"") - } - - ar, err := CreateTransactionRequest(args[0:]) // - if err != nil { - return shim.Error(err.Error()) - } - - // Validate buyer's ID - response := ValidateMember(stub, ar.UserId) - if response.Status != shim.OK { - fmt.Println("PostTransaction() : Failed Buyer not Registered in Blockchain ", ar.UserId) - return shim.Error(err.Error()) - } - - buyer := response.Payload - - fmt.Println("PostTransaction(): Validated Buyer information Successfully ", buyer, ar.UserId) - - // Validate Item record - response = ValidateItemSubmission(stub, ar.ItemID) - if response.Status != shim.OK { - fmt.Println("PostTransaction() : Failed Could not Validate Item Object in Blockchain ", ar.ItemID) - return shim.Error(err.Error()) - } - - lastUpdatedItemOBCObject := response.Payload - fmt.Println("PostTransaction() : Validated Item Object in Blockchain Successfully", ar.ItemID) - - // Update Item Object with new Owner Key - response = UpdateItemObject(stub, lastUpdatedItemOBCObject, ar.HammerPrice, ar.UserId) - newKey := response.Payload - if response.Status != shim.OK { - fmt.Println("PostTransaction() : Failed to update Item Master Object in Blockchain ", ar.ItemID) - return shim.Error(err.Error()) - } else { - // Write New Key to file - fmt.Println("PostTransaction() : New encryption Key is ", newKey) - } - - fmt.Println("PostTransaction() : Updated Item Master Object in Blockchain Successfully", ar.ItemID) - - // Post an Item Log - itemObject, err := JSONtoAR(lastUpdatedItemOBCObject) - if err != nil { - fmt.Println("PostTransaction() : Conversion error JSON to ItemRecord\n") - return shim.Error(err.Error()) - } - - // A life cycle event is added to say that the Item is no longer on auction - itemObject.ItemBasePrice = ar.HammerPrice - itemObject.CurrentOwnerID = ar.UserId - - response = PostItemLog(stub, itemObject, "NA", "DEFAULT", args[5]) - if response.Status != shim.OK { - fmt.Println("PostTransaction() : write error while inserting item log record\n") - return shim.Error(err.Error()) - } - - fmt.Println("PostTransaction() : Inserted item log record Successfully", ar.ItemID) - - // Convert Transaction Object to JSON - buff, err := TrantoJSON(ar) // - if err != nil { - fmt.Println("GetObjectBuffer() : Failed to convert Transaction Object to JSON ", args[0]) - return shim.Error(err.Error()) - } - - // Update the ledger with the Buffer Data - keys := []string{args[0], args[3]} - err = UpdateObject(stub, "Trans", keys, buff) - if err != nil { - fmt.Println("PostTransaction() : write error while inserting record\n") - return shim.Error(err.Error()) - } - - fmt.Println("PostTransaction() : Posted Transaction Record Successfully\n") - - // Returns New Key. To get Transaction Details, run GetTransaction - - secret_key, _ := json.Marshal(newKey) - fmt.Println(string(secret_key)) - return shim.Success(secret_key) - -} - -func CreateTransactionRequest(args []string) (ItemTransaction, error) { - - var at ItemTransaction - - // Check there are 9 Arguments - if len(args) != 9 { - fmt.Println("CreateTransactionRequest(): Incorrect number of arguments. Expecting 9 ") - return at, errors.New("CreateTransactionRequest() : Incorrect number of arguments. Expecting 9 ") - } - - at = ItemTransaction{args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8]} - fmt.Println("CreateTransactionRequest() : Transaction Request: ", at) - - return at, nil -} - -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Create a Bid Object -// Once an Item has been opened for auction, bids can be submitted as long as the auction is "OPEN" -//./peer chaincode invoke -l golang -n mycc -c '{"Function": "PostBid", "Args":["1111", "BID", "1", "1000", "300", "1200", "2017-01-23 14:00:00.3 +0000 UTC"]}' -//./peer chaincode invoke -l golang -n mycc -c '{"Function": "PostBid", "Args":["1111", "BID", "2", "1000", "400", "3000","2017-01-23 14:00:00.3 +0000 UTC"]}' -// -///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func PostBid(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - bid, err := CreateBidObject(args[0:]) // - if err != nil { - return shim.Error(err.Error()) - } - - // Reject the Bid if the Buyer Information Is not Valid or not registered on the Block Chain - response := ValidateMember(stub, args[4]) - if response.Status != shim.OK { - fmt.Println("PostBid() : Failed Buyer not registered on the block-chain ", args[4]) - return shim.Error(err.Error()) - } - - buyerInfo := response.Payload - fmt.Println("Buyer information ", buyerInfo, " ", args[4]) - - /////////////////////////////////////// - // Reject Bid if Auction is not "OPEN" - /////////////////////////////////////// - response = GetAuctionRequest(stub, "GetAuctionRequest", []string{args[0]}) - if response.Status != shim.OK { - fmt.Println("PostBid() : Cannot find Auction record ", args[0]) - return shim.Error("PostBid(): Cannot find Auction record : " + args[0]) - } - - RBytes := response.Payload - - aucR, err := JSONtoAucReq(RBytes) - if err != nil { - fmt.Println("PostBid() : Cannot UnMarshall Auction record") - return shim.Error("PostBid(): Cannot UnMarshall Auction record: " + args[0]) - } - - if aucR.Status != "OPEN" { - fmt.Println("PostBid() : Cannot accept Bid as Auction is not OPEN ", args[0]) - return shim.Error("PostBid(): Cannot accept Bid as Auction is not OPEN : " + args[0]) - } - - /////////////////////////////////////////////////////////////////// - // Reject Bid if the time bid was received is > Auction Close Time - /////////////////////////////////////////////////////////////////// - if tCompare(bid.BidTime, aucR.CloseDate) == false { - fmt.Println("PostBid() Failed : BidTime past the Auction Close Time") - error_str := fmt.Sprintf("PostBid() Failed : BidTime past the Auction Close Time %s, %s", bid.BidTime, aucR.CloseDate) - return shim.Error(error_str) - } - - ////////////////////////////////////////////////////////////////// - // Reject Bid if Item ID on Bid does not match Item ID on Auction - ////////////////////////////////////////////////////////////////// - if aucR.ItemID != bid.ItemID { - fmt.Println("PostBid() Failed : Item ID mismatch on bid. Bid Rejected") - return shim.Error("PostBid() : Item ID mismatch on Bid. Bid Rejected") - } - - ////////////////////////////////////////////////////////////////////// - // Reject Bid if Bid Price is less than Reserve Price - // Convert Bid Price and Reserve Price to Integer (TODO - Float) - ////////////////////////////////////////////////////////////////////// - bp, err := strconv.Atoi(bid.BidPrice) - if err != nil { - fmt.Println("PostBid() Failed : Bid price should be an integer") - return shim.Error("PostBid() : Bid price should be an integer") - } - - hp, err := strconv.Atoi(aucR.ReservePrice) - if err != nil { - return shim.Error("PostItem() : Reserve Price should be an integer") - } - - // Check if Bid Price is > Auction Request Reserve Price - if bp < hp { - return shim.Error("PostItem() : Bid Price must be greater than Reserve Price") - } - - //////////////////////////// - // Post or Accept the Bid - //////////////////////////// - buff, err := BidtoJSON(bid) // - - if err != nil { - fmt.Println("PostBid() : Failed Cannot create object buffer for write : ", args[1]) - return shim.Error("PostBid(): Failed Cannot create object buffer for write : " + args[1]) - } else { - // Update the ledger with the Buffer Data - // err = stub.PutState(args[0], buff) - keys := []string{args[0], args[2]} - err = UpdateObject(stub, "Bid", keys, buff) - if err != nil { - fmt.Println("PostBid() : write error while inserting record\n") - return shim.Error(err.Error()) - } - } - - return shim.Success(buff) -} - -func CreateBidObject(args []string) (Bid, error) { - var err error - var aBid Bid - - // Check there are 7 Arguments - // See example - if len(args) != 7 { - fmt.Println("CreateBidObject(): Incorrect number of arguments. Expecting 7 ") - return aBid, errors.New("CreateBidObject() : Incorrect number of arguments. Expecting 7 ") - } - - // Validate Bid is an integer - - _, err = strconv.Atoi(args[0]) - if err != nil { - return aBid, errors.New("CreateBidObject() : Bid ID should be an integer") - } - - _, err = strconv.Atoi(args[2]) - if err != nil { - return aBid, errors.New("CreateBidObject() : Bid ID should be an integer") - } - - // bidTime = args[6] sent by the client - aBid = Bid{args[0], args[1], args[2], args[3], args[4], args[5], args[6]} - fmt.Println("CreateBidObject() : Bid Object : ", aBid) - - return aBid, nil -} - -////////////////////////////////////////////////////////// -// JSON To args[] - return a map of the JSON string -////////////////////////////////////////////////////////// -func JSONtoArgs(Avalbytes []byte) (map[string]interface{}, error) { - - var data map[string]interface{} - - if err := json.Unmarshal(Avalbytes, &data); err != nil { - return nil, err - } - - return data, nil -} - -////////////////////////////////////////////////////////// -// Variation of the above - return value from a JSON string -////////////////////////////////////////////////////////// - -func GetKeyValue(Avalbytes []byte, key string) string { - var dat map[string]interface{} - if err := json.Unmarshal(Avalbytes, &dat); err != nil { - panic(err) - } - - val := dat[key].(string) - return val -} - -////////////////////////////////////////////////////////// -// Time and Date Comparison -// tCompare("2016-06-28 18:40:57", "2016-06-27 18:45:39") -////////////////////////////////////////////////////////// -func tCompare(t1 string, t2 string) bool { - - layout := "2006-01-02 15:04:05" - bidTime, err := time.Parse(layout, t1) - if err != nil { - fmt.Println("tCompare() Failed : time Conversion error on t1") - return false - } - - aucCloseTime, err := time.Parse(layout, t2) - if err != nil { - fmt.Println("tCompare() Failed : time Conversion error on t2") - return false - } - - if bidTime.Before(aucCloseTime) { - return true - } - - return false -} - -////////////////////////////////////////////////////////// -// Converts JSON String to an ART Object -////////////////////////////////////////////////////////// -func JSONtoAR(data []byte) (ItemObject, error) { - - ar := ItemObject{} - err := json.Unmarshal([]byte(data), &ar) - if err != nil { - fmt.Println("Unmarshal failed : ", err) - } - - return ar, err -} - -////////////////////////////////////////////////////////// -// Converts an ART Object to a JSON String -////////////////////////////////////////////////////////// -func ARtoJSON(ar ItemObject) ([]byte, error) { - - ajson, err := json.Marshal(ar) - if err != nil { - fmt.Println(err) - return nil, err - } - return ajson, nil -} - -////////////////////////////////////////////////////////// -// Converts an BID to a JSON String -////////////////////////////////////////////////////////// -func ItemLogtoJSON(item ItemLog) ([]byte, error) { - - ajson, err := json.Marshal(item) - if err != nil { - fmt.Println(err) - return nil, err - } - return ajson, nil -} - -////////////////////////////////////////////////////////// -// Converts an User Object to a JSON String -////////////////////////////////////////////////////////// -func JSONtoItemLog(ithis []byte) (ItemLog, error) { - - item := ItemLog{} - err := json.Unmarshal(ithis, &item) - if err != nil { - fmt.Println("JSONtoAucReq error: ", err) - return item, err - } - return item, err -} - -////////////////////////////////////////////////////////// -// Converts an Auction Request to a JSON String -////////////////////////////////////////////////////////// -func AucReqtoJSON(ar AuctionRequest) ([]byte, error) { - - ajson, err := json.Marshal(ar) - if err != nil { - fmt.Println(err) - return nil, err - } - return ajson, nil -} - -////////////////////////////////////////////////////////// -// Converts an User Object to a JSON String -////////////////////////////////////////////////////////// -func JSONtoAucReq(areq []byte) (AuctionRequest, error) { - - ar := AuctionRequest{} - err := json.Unmarshal(areq, &ar) - if err != nil { - fmt.Println("JSONtoAucReq error: ", err) - return ar, err - } - return ar, err -} - -////////////////////////////////////////////////////////// -// Converts an BID to a JSON String -////////////////////////////////////////////////////////// -func BidtoJSON(myHand Bid) ([]byte, error) { - - ajson, err := json.Marshal(myHand) - if err != nil { - fmt.Println(err) - return nil, err - } - return ajson, nil -} - -////////////////////////////////////////////////////////// -// Converts an User Object to a JSON String -////////////////////////////////////////////////////////// -func JSONtoBid(areq []byte) (Bid, error) { - - myHand := Bid{} - err := json.Unmarshal(areq, &myHand) - if err != nil { - fmt.Println("JSONtoAucReq error: ", err) - return myHand, err - } - return myHand, err -} - -////////////////////////////////////////////////////////// -// Converts an User Object to a JSON String -////////////////////////////////////////////////////////// -func UsertoJSON(user UserObject) ([]byte, error) { - - ajson, err := json.Marshal(user) - if err != nil { - fmt.Println("UsertoJSON error: ", err) - return nil, err - } - fmt.Println("UsertoJSON created: ", ajson) - return ajson, nil -} - -////////////////////////////////////////////////////////// -// Converts an User Object to a JSON String -////////////////////////////////////////////////////////// -func JSONtoUser(user []byte) (UserObject, error) { - - ur := UserObject{} - err := json.Unmarshal(user, &ur) - if err != nil { - fmt.Println("UsertoJSON error: ", err) - return ur, err - } - fmt.Println("UsertoJSON created: ", ur) - return ur, err -} - -////////////////////////////////////////////////////////// -// Converts an Item Transaction to a JSON String -////////////////////////////////////////////////////////// -func TrantoJSON(at ItemTransaction) ([]byte, error) { - - ajson, err := json.Marshal(at) - if err != nil { - fmt.Println(err) - return nil, err - } - return ajson, nil -} - -////////////////////////////////////////////////////////// -// Converts an Trans Object to a JSON String -////////////////////////////////////////////////////////// -func JSONtoTran(areq []byte) (ItemTransaction, error) { - - at := ItemTransaction{} - err := json.Unmarshal(areq, &at) - if err != nil { - fmt.Println("JSONtoTran error: ", err) - return at, err - } - return at, err -} - -////////////////////////////////////////////// -// Validates an ID for Well Formed -////////////////////////////////////////////// - -func validateID(id string) error { - // Validate UserID is an integer - - _, err := strconv.Atoi(id) - if err != nil { - return errors.New("validateID(): User ID should be an integer") - } - return nil -} - -////////////////////////////////////////////// -// Create an ItemLog from Item -////////////////////////////////////////////// - -func ItemToItemLog(io ItemObject, cdt string) ItemLog { - - iLog := ItemLog{} - iLog.ItemID = io.ItemID - iLog.Status = "INITIAL" - iLog.AuctionedBy = "DEFAULT" - iLog.RecType = "ILOG" - iLog.ItemDesc = io.ItemDesc - iLog.CurrentOwner = io.CurrentOwnerID - iLog.Date = cdt - - return iLog -} - -////////////////////////////////////////////// -// Convert Bid to Transaction for Posting -////////////////////////////////////////////// - -func BidtoTransaction(bid Bid) ItemTransaction { - - var t ItemTransaction - t.AuctionID = bid.AuctionID - t.RecType = "POSTTRAN" - t.ItemID = bid.ItemID - t.TransType = "SALE" - t.UserId = bid.BuyerID - // Ideally SystemChain Code must provide a TimeStamp Function - t.TransDate = bid.BidTime - t.HammerTime = bid.BidTime - t.HammerPrice = bid.BidPrice - t.Details = "The Highest Bidder does not always win" - - return t -} - -//////////////////////////////////////////////////////////////////////////// -// Validate if the User Information Exists -// in the block-chain -//////////////////////////////////////////////////////////////////////////// -func ValidateMember(stub shim.ChaincodeStubInterface, owner string) pb.Response { - - // Get the Item Objects and Display it - // Avalbytes, err := stub.GetState(owner) - args := []string{owner} - Avalbytes, err := QueryObject(stub, "User", args) - - if err != nil { - fmt.Println("ValidateMember() : Failed - Cannot find valid owner record for ART ", owner) - jsonResp := "{\"Error\":\"Failed to get Owner Object Data for " + owner + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - fmt.Println("ValidateMember() : Failed - Incomplete owner record for ART ", owner) - jsonResp := "{\"Error\":\"Failed - Incomplete information about the owner for " + owner + "\"}" - return shim.Error(jsonResp) - } - - fmt.Println("ValidateMember() : Validated Item Owner:\n", owner) - return shim.Success(Avalbytes) -} - -//////////////////////////////////////////////////////////////////////////// -// Validate if the User Information Exists -// in the block-chain -//////////////////////////////////////////////////////////////////////////// -func ValidateItemSubmission(stub shim.ChaincodeStubInterface, artId string) pb.Response { - - // Get the Item Objects and Display it - args := []string{artId} - Avalbytes, err := QueryObject(stub, "Item", args) - if err != nil { - fmt.Println("ValidateItemSubmission() : Failed - Cannot find valid owner record for ART ", artId) - jsonResp := "{\"Error\":\"Failed to get Owner Object Data for " + artId + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - fmt.Println("ValidateItemSubmission() : Failed - Incomplete owner record for ART ", artId) - jsonResp := "{\"Error\":\"Failed - Incomplete information about the owner for " + artId + "\"}" - return shim.Error(jsonResp) - } - - //fmt.Println("ValidateItemSubmission() : Validated Item Owner:", Avalbytes) - return shim.Success(Avalbytes) -} - -///////////////////////////////////////////////////////////////////////////////////////////////////// -// Get List of Bids for an Auction -// in the block-chain -- -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetListOfBids", "Args": ["1111"]}' -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetLastBid", "Args": ["1111"]}' -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetHighestBid", "Args": ["1111"]}' -///////////////////////////////////////////////////////////////////////////////////////////////////// -func GetListOfBids(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - rs, err := GetList(stub, "Bid", args) - if err != nil { - error_str := fmt.Sprintf("GetListOfBids operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - - defer rs.Close() - - // Iterate through result set - var i int - var tlist []Bid // Define a list - for i = 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - responseRange, err := rs.Next() - - if err != nil { - return shim.Success(nil) - } - bid, err := JSONtoBid(responseRange.Value) - if err != nil { - error_str := fmt.Sprintf("GetListOfBids() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - fmt.Println("GetList() : my Value : ", bid) - tlist = append(tlist, bid) - } - - jsonRows, err := json.Marshal(tlist) - if err != nil { - error_str := fmt.Sprintf("GetListOfBids() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - fmt.Println("List of Bids Requested : ", jsonRows) - return shim.Success(jsonRows) - -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////// -// Get List of Auctions that have been initiated -// in the block-chain -// This is a fixed Query to be issued as below -// peer chaincode query -l golang -n mycc -c '{"Args": ["qGetListOfInitAucs", "2016"]}' -//////////////////////////////////////////////////////////////////////////////////////////////////////// -func GetListOfInitAucs(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - rs, err := GetList(stub, "AucInit", args) - if err != nil { - error_str := fmt.Sprintf("GetListOfInitAucs operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - - defer rs.Close() - - // Iterate through result set - var i int - var tlist []AuctionRequest // Define a list - for i = 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - responseRange, err := rs.Next() - if err != nil { - return shim.Success(nil) - } - ar, err := JSONtoAucReq(responseRange.Value) - if err != nil { - error_str := fmt.Sprintf("GetListOfInitAucs() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - fmt.Println("GetListOfInitAucs() : my Value : ", ar) - tlist = append(tlist, ar) - } - - jsonRows, err := json.Marshal(tlist) - if err != nil { - error_str := fmt.Sprintf("GetListOfInitAucs() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - //fmt.Println("List of Auctions Requested : ", jsonRows) - return shim.Success(jsonRows) - -} - -//////////////////////////////////////////////////////////////////////////// -// Get List of Open Auctions for which bids can be supplied -// in the block-chain -// This is a fixed Query to be issued as below -// peer chaincode query -l golang -n mycc -c '{"Args": ["qGetListOfOpenAucs", "2016"]}' -//////////////////////////////////////////////////////////////////////////// -func GetListOfOpenAucs(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - rs, err := GetList(stub, "AucOpen", args) - if err != nil { - error_str := fmt.Sprintf("GetListOfOpenAucs operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - defer rs.Close() - - // Iterate through result set - var i int - var tlist []AuctionRequest // Define a list - for i = 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - responseRange, err := rs.Next() - if err != nil { - return shim.Success(nil) - } - ar, err := JSONtoAucReq(responseRange.Value) - if err != nil { - error_str := fmt.Sprintf("GetListOfOpenAucs() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - fmt.Println("GetListOfOpenAucs() : my Value : ", ar) - tlist = append(tlist, ar) - } - - jsonRows, err := json.Marshal(tlist) - if err != nil { - error_str := fmt.Sprintf("GetListOfInitAucs() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - //fmt.Println("List of Open Auctions : ", jsonRows) - return shim.Success(jsonRows) - -} - -//////////////////////////////////////////////////////////////////////////// -// Get the Item History for an Item -// in the block-chain .. Pass the Item ID -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetItemLog", "Args": ["1000"]}' -//////////////////////////////////////////////////////////////////////////// -func GetItemLog(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - // Check there are 1 Arguments provided as per the struct - two are computed - // See example - if len(args) < 1 { - fmt.Println("GetItemLog(): Incorrect number of arguments. Expecting 1 ") - fmt.Println("GetItemLog(): ./peer chaincode query -l golang -n mycc -c '{\"Function\": \"GetItem\", \"Args\": [\"1111\"]}'") - return shim.Error("CreateItemObject(): Incorrect number of arguments. Expecting 12 ") - } - - rs, err := GetList(stub, "ItemHistory", args) - if err != nil { - error_str := fmt.Sprintf("GetItemLog operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - - defer rs.Close() - - // Iterate through result set - var i int - var tlist []ItemLog // Define a list - for i = 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - responseRange, err := rs.Next() - if err != nil { - return shim.Success(nil) - } - il, err := JSONtoItemLog(responseRange.Value) - if err != nil { - error_str := fmt.Sprintf("GetItemLog() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - fmt.Println("GetItemLog() : my Value : ", il) - tlist = append(tlist, il) - } - - jsonRows, err := json.Marshal(tlist) - if err != nil { - error_str := fmt.Sprintf("GetItemLog() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - //fmt.Println("All History : ", jsonRows) - return shim.Success(jsonRows) - -} - -//////////////////////////////////////////////////////////////////////////// -// Get a List of Items by Category -// in the block-chain -// Input is 2016 + Category -// Categories include whatever has been defined in the Item Tables - Landscape, Modern, ... -// See Sample data -// ./peer chaincode query -l golang -n mycc -c '{"Function": "GetItemListByCat", "Args": ["2016", "Modern"]}' -//////////////////////////////////////////////////////////////////////////// -func GetItemListByCat(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - // Check there are 1 Arguments provided as per the struct - two are computed - // See example - if len(args) < 1 { - fmt.Println("GetItemListByCat(): Incorrect number of arguments. Expecting 1 ") - fmt.Println("GetItemListByCat(): ./peer chaincode query -l golang -n mycc -c '{\"Function\": \"GetItemListByCat\", \"Args\": [\"Modern\"]}'") - return shim.Error("CreateItemObject(): Incorrect number of arguments. Expecting 1 ") - } - - rs, err := GetList(stub, "ItemCat", args) - if err != nil { - error_str := fmt.Sprintf("GetItemListByCat operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - - defer rs.Close() - - // Iterate through result set - var i int - var tlist []ItemObject // Define a list - for i = 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - responseRange, err := rs.Next() - if err != nil { - return shim.Success(nil) - } - io, err := JSONtoAR(responseRange.Value) - if err != nil { - error_str := fmt.Sprintf("GetItemListByCat() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - fmt.Println("GetItemListByCat() : my Value : ", io) - tlist = append(tlist, io) - } - - jsonRows, err := json.Marshal(tlist) - if err != nil { - error_str := fmt.Sprintf("GetItemListByCat() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - //fmt.Println("All Items : ", jsonRows) - return shim.Success(jsonRows) - -} - -//////////////////////////////////////////////////////////////////////////// -// Get a List of Users by Category -// in the block-chain -//////////////////////////////////////////////////////////////////////////// -func GetUserListByCat(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - // Check there are 1 Arguments provided as per the struct - two are computed - // See example - if len(args) < 1 { - fmt.Println("GetUserListByCat(): Incorrect number of arguments. Expecting 1 ") - fmt.Println("GetUserListByCat(): ./peer chaincode query -l golang -n mycc -c '{\"Function\": \"GetUserListByCat\", \"Args\": [\"AH\"]}'") - return shim.Error("CreateUserObject(): Incorrect number of arguments. Expecting 1 ") - } - - rs, err := GetList(stub, "UserCat", args) - if err != nil { - error_str := fmt.Sprintf("GetUserListByCat operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - - defer rs.Close() - - // Iterate through result set - var i int - var tlist []UserObject // Define a list - for i = 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - responseRange, err := rs.Next() - if err != nil { - return shim.Success(nil) - } - uo, err := JSONtoUser(responseRange.Value) - if err != nil { - error_str := fmt.Sprintf("GetUserListByCat() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - fmt.Println("GetUserListByCat() : my Value : ", uo) - tlist = append(tlist, uo) - } - - jsonRows, err := json.Marshal(tlist) - if err != nil { - error_str := fmt.Sprintf("GetUserListByCat() operation failed - Unmarshall Error. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - //fmt.Println("All Users : ", jsonRows) - return shim.Success(jsonRows) - -} - -//////////////////////////////////////////////////////////////////////////// -// Get The Highest Bid Received so far for an Auction -// in the block-chain -//////////////////////////////////////////////////////////////////////////// -func GetLastBid(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - var Avalbytes []byte - - layout := "2006-01-02 15:04:05" - highestTime, err := time.Parse(layout, layout) - - rs, err := GetList(stub, "Bid", args) - if err != nil { - error_str := fmt.Sprintf("GetListOfBids operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - - defer rs.Close() - // Iterate through result set - - for i := 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - responseRange, err := rs.Next() - if err != nil { - return shim.Success(nil) - } - currentBid, err := JSONtoBid(responseRange.Value) - if err != nil { - error_str := fmt.Sprintf("GetHighestBid(0 operation failed. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - bidTime, err := time.Parse(layout, currentBid.BidTime) - if err != nil { - error_str := fmt.Sprintf("GetLastBid() Failed : time Conversion error on BidTime %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - if bidTime.Sub(highestTime) > 0 { - highestTime = bidTime - Avalbytes = responseRange.Value - } - return shim.Success(Avalbytes) - } - return shim.Error("GetLastBid() : Failed - No Bids Found") -} - -//////////////////////////////////////////////////////////////////////////// -// Get The Highest Bid Received so far for an Auction -// in the block-chain -//////////////////////////////////////////////////////////////////////////// -func GetNoOfBidsReceived(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - rs, err := GetList(stub, "Bid", args) - if err != nil { - error_str := fmt.Sprintf("GetListOfBids operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - - defer rs.Close() - - // Iterate through result set - var i int - for i = 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - _, err := rs.Next() - if err != nil { - return shim.Success(nil) - } - } - return shim.Success([]byte(strconv.Itoa(i))) -} - -//////////////////////////////////////////////////////////////////////////// -// Get the Highest Bid in the List -// -//////////////////////////////////////////////////////////////////////////// -func GetHighestBid(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - var Avalbytes []byte - highestBid := 0 - rs, err := GetList(stub, "Bid", args) - if err != nil { - error_str := fmt.Sprintf("GetListOfBids operation failed. Error marshaling JSON: %s", err) - return shim.Error(error_str) - } - - defer rs.Close() - - // Iterate through result set - var i int - for i = 0; rs.HasNext(); i++ { - - // We can process whichever return value is of interest - responseRange, err := rs.Next() - if err != nil { - return shim.Success(nil) - } - currentBid, err := JSONtoBid(responseRange.Value) - if err != nil { - error_str := fmt.Sprintf("GetHighestBid(0 operation failed. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - bidPrice, err := strconv.Atoi(currentBid.BidPrice) - if err != nil { - error_str := fmt.Sprintf("GetHighestBid() Int Conversion error on BidPrice! failed. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - - if bidPrice >= highestBid { - highestBid = bidPrice - Avalbytes = responseRange.Value - - } - } - - return shim.Success(Avalbytes) -} - -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Trigger the Auction -// Structure of args auctionReqID, RecType, AucStartDateTime, Duration in Minutes ( 3 = 3 minutes) -// ./peer chaincode invoke -l golang -n mycc -c '{"Function": "OpenAuctionForBids", "Args":["1111", "OPENAUC", "3", "2006-01-02 15:04:05"]}' -/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func OpenAuctionForBids(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - // Fetch Auction Object and check its Status - Avalbytes, err := QueryObject(stub, "Auction", []string{args[0]}) - if err != nil { - fmt.Println("OpenAuctionForBids(): Auction Object Retrieval Failed ") - return shim.Error("OpenAuctionForBids(): Auction Object Retrieval Failed ") - } - - aucR, err := JSONtoAucReq(Avalbytes) - if err != nil { - fmt.Println("OpenAuctionForBids(): Auction Object Unmarshalling Failed ") - return shim.Error("OpenAuctionForBids(): Auction Object UnMarshalling Failed ") - } - - if aucR.Status == "CLOSED" { - fmt.Println("OpenAuctionForBids(): Auction is Closed - Cannot Open for new bids ") - return shim.Error("OpenAuctionForBids(): is Closed - Cannot Open for new bids Failed ") - } - - // Calculate Time Now and Duration of Auction - - // Validate arg[1] is an integer as it represents Duration in Minutes - aucDuration, err := strconv.Atoi(args[2]) - if err != nil { - fmt.Println("OpenAuctionForBids(): Auction Duration is an integer that represents minute! OpenAuctionForBids() Failed ") - return shim.Error("OpenAuctionForBids(): Auction Duration is an integer that represents minute! OpenAuctionForBids() Failed ") - } - - aucStartDate, err := time.Parse("2006-01-02 15:04:05", args[3]) - aucEndDate := aucStartDate.Add(time.Duration(aucDuration) * time.Minute) - - // We don't use go routines anymore to time the auction - //sleepTime := time.Duration(aucDuration * 60 * 1000 * 1000 * 1000) - - // Update Auction Object - aucR.OpenDate = aucStartDate.Format("2006-01-02 15:04:05") - aucR.CloseDate = aucEndDate.Format("2006-01-02 15:04:05") - aucR.Status = "OPEN" - - response := UpdateAuctionStatus(stub, "Auction", aucR) - if response.Status != shim.OK { - fmt.Println("OpenAuctionForBids(): UpdateAuctionStatus() Failed ") - return shim.Error("OpenAuctionForBids(): UpdateAuctionStatus() Failed ") - } - - buff := response.Payload - - // Remove the Auction from INIT Bucket and move to OPEN bucket - // This was designed primarily to help the UI - - keys := []string{"2016", aucR.AuctionID} - err = DeleteObject(stub, "AucInit", keys) - if err != nil { - fmt.Println("OpenAuctionForBids(): DeleteFromLedger() Failed ") - return shim.Error("OpenAuctionForBids(): DeleteFromLedger() Failed ") - } - - // Add the Auction to Open Bucket - err = UpdateObject(stub, "AucOpen", keys, buff) - if err != nil { - fmt.Println("OpenAuctionForBids() : write error while inserting record into AucInit\n") - return shim.Error(err.Error()) - } - - return shim.Success(buff) -} - -////////////////////////////////////////////////////////////////////////// -// Close Open Auctions -// 1. Read OpenAucTable -// 2. Compare now with expiry time with now -// 3. If now is > expiry time call CloseAuction -// ./peer chaincode invoke -l golang -n mycc -c '{"Function": "CloseOpenAuctions", "Args": ["2016", "CLAUC", currentDateTime]}' -////////////////////////////////////////////////////////////////////////// - -func CloseOpenAuctions(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - response := GetListOfOpenAucs(stub, "AucOpen", []string{"2016"}) - if response.Status != shim.OK { - error_str := fmt.Sprintf("CloseOpenAuctions() operation failed. Error retrieving values from AucOpen: %s", response.Message) - fmt.Println(error_str) - return shim.Error(error_str) - } - - rows := response.Payload - tlist := make([]AuctionRequest, len(rows)) - err := json.Unmarshal([]byte(rows), &tlist) - if err != nil { - error_str := fmt.Sprintf("CloseOpenAuctions() Unmarshal operation failed. Error retrieving values from AucOpen: %s", response.Message) - fmt.Println(error_str) - return shim.Error(error_str) - } - - for i := 0; i < len(tlist); i++ { - ar := tlist[i] - - fmt.Println("CloseOpenAuctions() ", ar) - - // Compare Auction Times where args[2] = the CurrentTime sent by the client - fmt.Println("CloseOpenAuctions() : ", args[2], ": ar.CloseDate : ", ar.CloseDate) - if tCompare(args[2], ar.CloseDate) == false { - - // Request Closing Auction - response := CloseAuction(stub, "CloseAuction", []string{ar.AuctionID}) - if response.Status != shim.OK { - error_str := fmt.Sprintf("CloseOpenAuctions() operation failed. %s", err) - fmt.Println(error_str) - return shim.Error(error_str) - } - } - } - - return shim.Success(nil) -} - -////////////////////////////////////////////////////////////////////////// -// Close the Auction -// This is invoked by OpenAuctionForBids -// which kicks-off a go-routine timer for the duration of the auction -// When the timer expires, it creates a shell script to CloseAuction() and triggers it -// This function can also be invoked via CLI - the intent was to close as and when I implement BuyItNow() -// CloseAuction -// - Sets the status of the Auction to "CLOSED" -// - Removes the Auction from the Open Auction list (AucOpenTable) -// - Retrieves the Highest Bid and creates a Transaction -// - Posts The Transaction -// -// To invoke from Command Line via CLI or REST API -// ./peer chaincode invoke -l golang -n mycc -c '{"Function": "CloseAuction", "Args": ["1111", "AUCREQ"]}' -// ./peer chaincode invoke -l golang -n mycc -c '{"Function": "CloseAuction", "Args": ["1111", "AUCREQ"]}' -// -////////////////////////////////////////////////////////////////////////// - -func CloseAuction(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - // Close The Auction - Fetch Auction Object - Avalbytes, err := QueryObject(stub, "Auction", []string{args[0]}) - if err != nil { - fmt.Println("CloseAuction(): Auction Object Retrieval Failed ") - return shim.Error("CloseAuction(): Auction Object Retrieval Failed ") - } - - aucR, err := JSONtoAucReq(Avalbytes) - if err != nil { - fmt.Println("CloseAuction(): Auction Object Unmarshalling Failed ") - return shim.Error("CloseAuction(): Auction Object UnMarshalling Failed ") - } - - // Update Auction Status - aucR.Status = "CLOSED" - fmt.Println("CloseAuction(): UpdateAuctionStatus() successful ", aucR) - - response := UpdateAuctionStatus(stub, "Auction", aucR) - if response.Status != shim.OK { - fmt.Println("CloseAuction(): UpdateAuctionStatus() Failed ") - return shim.Error("CloseAuction(): UpdateAuctionStatus() Failed ") - } - Avalbytes = response.Payload - - // Remove the Auction from Open Bucket - keys := []string{"2016", aucR.AuctionID} - err = DeleteObject(stub, "AucOpen", keys) - if err != nil { - fmt.Println("CloseAuction(): DeleteFromLedger(AucOpenTable) Failed ") - return shim.Error("CloseAuction(): DeleteFromLedger(AucOpen) Failed ") - } - - fmt.Println("CloseAuction(): Proceeding to process the highest bid ") - - // Process Final Bid - Turn it into a Transaction - response = GetHighestBid(stub, "GetHighestBid", []string{args[0]}) - Avalbytes = response.Payload - if Avalbytes == nil { - fmt.Println("CloseAuction(): No bids available, no change in Item Status - PostTransaction() Completed Successfully ") - return shim.Success(Avalbytes) - } - - if response.Status != shim.OK { - fmt.Println("CloseAuction(): No bids available, error encountered - PostTransaction() failed ") - return shim.Error(err.Error()) - } - - bid, _ := JSONtoBid(Avalbytes) - fmt.Println("CloseAuction(): Proceeding to process the highest bid ", bid) - tran := BidtoTransaction(bid) - fmt.Println("CloseAuction(): Converting Bid to tran ", tran) - - // Process the last bid once Time Expires - tranArgs := []string{tran.AuctionID, tran.RecType, tran.ItemID, tran.TransType, tran.UserId, tran.TransDate, tran.HammerTime, tran.HammerPrice, tran.Details} - fmt.Println("CloseAuction(): Proceeding to process the Transaction ", tranArgs) - - response = PostTransaction(stub, "PostTransaction", tranArgs) - if response.Status != shim.OK { - fmt.Println("CloseAuction(): PostTransaction() Failed ") - return shim.Error("CloseAuction(): PostTransaction() Failed ") - } - Avalbytes = response.Payload - fmt.Println("CloseAuction(): PostTransaction() Completed Successfully ") - return shim.Success(Avalbytes) - -} - -//////////////////////////////////////////////////////////////////////////////////////////// -// Buy It Now -// Rules: -// If Buy IT Now Option is available then a Buyer has the option to buy the ITEM -// before the bids exceed BuyITNow Price . Normally, The application should take of this -// at the UI level and this chain-code assumes application has validated that -//////////////////////////////////////////////////////////////////////////////////////////// - -func BuyItNow(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - - // Process Final Bid - Turn it into a Transaction - response := GetHighestBid(stub, "GetHighestBid", []string{args[0]}) - bid, err := JSONtoBid(response.Payload) - if err != nil { - return shim.Error("BuyItNow() : JSONtoBid Error") - } - - // Check if BuyItNow Price > Highest Bid so far - binP, err := strconv.Atoi(args[5]) - if err != nil { - return shim.Error("BuyItNow() : Invalid BuyItNow Price") - } - - hbP, err := strconv.Atoi(bid.BidPrice) - if err != nil { - return shim.Error("BuyItNow() : Invalid Highest Bid Price") - } - - if hbP > binP { - return shim.Error("BuyItNow() : Highest Bid Price > BuyItNow Price - BuyItNow Rejected") - } - - // Close The Auction - Fetch Auction Object - Avalbytes, err := QueryObject(stub, "Auction", []string{args[0]}) - if err != nil { - fmt.Println("BuyItNow(): Auction Object Retrieval Failed ") - return shim.Error("BuyItNow(): Auction Object Retrieval Failed ") - } - - aucR, err := JSONtoAucReq(Avalbytes) - if err != nil { - fmt.Println("BuyItNow(): Auction Object Unmarshalling Failed ") - return shim.Error("BuyItNow(): Auction Object UnMarshalling Failed ") - } - - // Update Auction Status - aucR.Status = "CLOSED" - fmt.Println("BuyItNow(): UpdateAuctionStatus() successful ", aucR) - - response = UpdateAuctionStatus(stub, "Auction", aucR) - if response.Status != shim.OK { - fmt.Println("BuyItNow(): UpdateAuctionStatus() Failed ") - return shim.Error("BuyItNow(): UpdateAuctionStatus() Failed ") - } - Avalbytes = response.Payload - - // Remove the Auction from Open Bucket - keys := []string{"2016", aucR.AuctionID} - err = DeleteObject(stub, "AucOpen", keys) - if err != nil { - fmt.Println("BuyItNow(): DeleteFromLedger(AucOpen) Failed ") - return shim.Error("BuyItNow(): DeleteFromLedger(AucOpen) Failed ") - } - - fmt.Println("BuyItNow(): Proceeding to process the highest bid ") - - // Convert the BuyITNow to a Bid type struct - buyItNowBid, err := CreateBidObject(args[0:]) - if err != nil { - return shim.Error(err.Error()) - } - - // Reject the offer if the Buyer Information Is not Valid or not registered on the Block Chain - response = ValidateMember(stub, args[4]) - if response.Status != shim.OK { - fmt.Println("BuyItNow() : Failed Buyer not registered on the block-chain ", args[4]) - return shim.Error(err.Error()) - } - - buyerInfo := response.Payload - fmt.Println("Buyer information ", buyerInfo, args[4]) - - tran := BidtoTransaction(buyItNowBid) - fmt.Println("BuyItNow(): Converting Bid to tran ", tran) - - // Process the buy-it-now offer - tranArgs := []string{tran.AuctionID, tran.RecType, tran.ItemID, tran.TransType, tran.UserId, tran.TransDate, tran.HammerTime, tran.HammerPrice, tran.Details} - fmt.Println("BuyItNow(): Proceeding to process the Transaction ", tranArgs) - - response = PostTransaction(stub, "PostTransaction", tranArgs) - if response.Status != shim.OK { - fmt.Println("BuyItNow(): PostTransaction() Failed ") - return shim.Error("CloseAuction(): PostTransaction() Failed ") - } - - fmt.Println("BuyItNow(): PostTransaction() Completed Successfully ") - return response -} - -////////////////////////////////////////////////////////////////////////// -// Update the Auction Object -// This function updates the status of the auction -// from INIT to OPEN to CLOSED -////////////////////////////////////////////////////////////////////////// - -func UpdateAuctionStatus(stub shim.ChaincodeStubInterface, tableName string, ar AuctionRequest) pb.Response { - - buff, err := AucReqtoJSON(ar) - if err != nil { - fmt.Println("UpdateAuctionStatus() : Failed Cannot create object buffer for write : ", ar.AuctionID) - return shim.Error("UpdateAuctionStatus(): Failed Cannot create object buffer for write : " + ar.AuctionID) - } - - // Update the ledger with the Buffer Data - //keys := []string{ar.AuctionID, ar.ItemID} - keys := []string{ar.AuctionID} - err = ReplaceObject(stub, "Auction", keys, buff) - if err != nil { - fmt.Println("UpdateAuctionStatus() : write error while inserting record\n") - return shim.Error(err.Error()) - } - return shim.Success(buff) -} - -///////////////////////////////////////////////////////////////////////////////////////////// -// Return the right Object Buffer after validation to write to the ledger -// var recType = []string{"ARTINV", "USER", "BID", "AUCREQ", "POSTTRAN", "OPENAUC", "CLAUC"} -///////////////////////////////////////////////////////////////////////////////////////////// - -func ProcessQueryResult(stub shim.ChaincodeStubInterface, Avalbytes []byte, args []string) error { - - // Identify Record Type by scanning the args for one of the recTypes - // This is kind of a post-processor once the query fetches the results - // RecType is the style of programming in the punch card days .. - // ... well - - var dat map[string]interface{} - - if err := json.Unmarshal(Avalbytes, &dat); err != nil { - panic(err) - } - - var recType string - recType = dat["RecType"].(string) - switch recType { - - case "ARTINV": - - ar, err := JSONtoAR(Avalbytes) // - if err != nil { - fmt.Println("ProcessRequestType(): Cannot create itemObject \n") - return err - } - // Decrypt Image and Save Image in a file - image := Decrypt(ar.AES_Key, ar.ItemImage) - if err != nil { - fmt.Println("ProcessRequestType() : Image decrytion failed ") - return err - } - fmt.Println("ProcessRequestType() : Image conversion from byte[] to file Successful ") - err = ByteArrayToImage(image, "copy."+ar.ItemPicFN) - if err != nil { - - fmt.Println("ProcessRequestType() : Image conversion from byte[] to file failed ") - return err - } - return err - - case "USER": - ur, err := JSONtoUser(Avalbytes) // - if err != nil { - return err - } - fmt.Println("ProcessRequestType() : ", ur) - return err - - case "AUCREQ": - ar, err := JSONtoAucReq(Avalbytes) // - if err != nil { - return err - } - fmt.Println("ProcessRequestType() : ", ar) - return err - - case "OPENAUC": - ar, err := JSONtoAucReq(Avalbytes) // - if err != nil { - return err - } - fmt.Println("ProcessRequestType() : ", ar) - return err - case "CLAUC": - ar, err := JSONtoAucReq(Avalbytes) // - if err != nil { - return err - } - fmt.Println("ProcessRequestType() : ", ar) - return err - case "POSTTRAN": - atr, err := JSONtoTran(Avalbytes) // - if err != nil { - return err - } - fmt.Println("ProcessRequestType() : ", atr) - return err - case "BID": - bid, err := JSONtoBid(Avalbytes) // - if err != nil { - return err - } - fmt.Println("ProcessRequestType() : ", bid) - return err - case "DEFAULT": - return nil - case "XFER": - return nil - default: - return errors.New("Unknown") - } - return nil - -} diff --git a/app/platform/fabric/e2e-test/chaincodes/auctionapp/image_proc_api.go b/app/platform/fabric/e2e-test/chaincodes/auctionapp/image_proc_api.go deleted file mode 100644 index 75f42466e..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/auctionapp/image_proc_api.go +++ /dev/null @@ -1,263 +0,0 @@ -/****************************************************************** -Copyright IT People Corp. 2017 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -******************************************************************/ - -/////////////////////////////////////////////////////////////////////// -// Author : IT People - Mohan Venkataraman - image API -// Purpose: Explore the Hyperledger/fabric and understand -// how to write an chain code, application/chain code boundaries -// The code is not the best as it has just hammered out in a day or two -// Feedback and updates are appreciated -/////////////////////////////////////////////////////////////////////// - -package main - -import ( - "bufio" - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "errors" - "fmt" - "image" - "image/gif" - "image/jpeg" - "image/png" - "io" - "net/http" - "os" -) - -/////////////////////////////////////////////////////////// -// Convert Image to []bytes and viceversa -// Detect Image Filetype -// Image Function to read an image and create a byte array -// Currently only PNG images are supported -/////////////////////////////////////////////////////////// -func ImageToByteArray(imageFile string) ([]byte, string) { - - file, err := os.Open(imageFile) - - if err != nil { - fmt.Println("imageToByteArray() : cannot OPEN image file ", err) - return nil, string("imageToByteArray() : cannot OPEN image file ") - } - - defer file.Close() - - fileInfo, _ := file.Stat() - var size int64 = fileInfo.Size() - bytes := make([]byte, size) - - // read file into bytes - buff := bufio.NewReader(file) - _, err = buff.Read(bytes) - - if err != nil { - fmt.Println("imageToByteArray() : cannot READ image file") - return nil, string("imageToByteArray() : cannot READ image file ") - } - - filetype := http.DetectContentType(bytes) - fmt.Println("imageToByteArray() : ", filetype) - //filetype := GetImageType(bytes) - - return bytes, filetype -} - -////////////////////////////////////////////////////// -// If Valid fileType, will have "image" as first word -////////////////////////////////////////////////////// -func GetImageType(buff []byte) string { - filetype := http.DetectContentType(buff) - - switch filetype { - case "image/jpeg", "image/jpg": - return filetype - - case "image/gif": - return filetype - - case "image/png": - return filetype - - case "application/pdf": // not image, but application ! - filetype = "application/pdf" - default: - filetype = "Unknown" - } - return filetype -} - -//////////////////////////////////////////////////////////// -// Converts a byteArray into an image and saves it -// into an appropriate file -// It is important to get the file type before saving the -// file by call the GetImageType -//////////////////////////////////////////////////////////// -func ByteArrayToImage(imgByte []byte, imageFile string) error { - - // convert []byte to image for saving to file - img, _, _ := image.Decode(bytes.NewReader(imgByte)) - - fmt.Println("ProcessQueryResult ByteArrayToImage : proceeding to create image ") - - //save the imgByte to file - out, err := os.Create(imageFile) - - if err != nil { - fmt.Println("ByteArrayToImage() : cannot CREATE image file ", err) - return errors.New("ByteArrayToImage() : cannot CREATE image file ") - } - fmt.Println("ProcessRequestType ByteArrayToImage : proceeding to Encode image ") - - //err = png.Encode(out, img) - filetype := http.DetectContentType(imgByte) - - switch filetype { - case "image/jpeg", "image/jpg": - var opt jpeg.Options - opt.Quality = 100 - err = jpeg.Encode(out, img, &opt) - - case "image/gif": - var opt gif.Options - opt.NumColors = 256 - err = gif.Encode(out, img, &opt) - - case "image/png": - err = png.Encode(out, img) - - default: - err = errors.New("Only PMNG, JPG and GIF Supported ") - } - - if err != nil { - fmt.Println("ByteArrayToImage() : cannot ENCODE image file ", err) - return errors.New("ByteArrayToImage() : cannot ENCODE image file ") - } - - // everything ok - fmt.Println("Image file generated and saved to ", imageFile) - return nil -} - -/////////////////////////////////////////////////////////////////////// -// Encryption and Decryption Section -// Images will be Encrypted and stored and the key will be part of the -// certificate that is provided to the Owner -/////////////////////////////////////////////////////////////////////// - -const ( - AESKeyLength = 32 // AESKeyLength is the default AES key length - NonceSize = 24 // NonceSize is the default NonceSize -) - -/////////////////////////////////////////////////// -// GetRandomBytes returns len random looking bytes -/////////////////////////////////////////////////// -func GetRandomBytes(len int) ([]byte, error) { - key := make([]byte, len) - - _, err := rand.Read(key) - if err != nil { - return nil, err - } - - return key, nil -} - -//////////////////////////////////////////////////////////// -// GenAESKey returns a random AES key of length AESKeyLength -// 3 Functions to support Encryption and Decryption -// GENAESKey() - Generates AES symmetric key -// Encrypt() Encrypts a [] byte -// Decrypt() Decryts a [] byte -//////////////////////////////////////////////////////////// -func GenAESKey() ([]byte, error) { - return GetRandomBytes(AESKeyLength) -} - -func PKCS5Pad(src []byte) []byte { - padding := aes.BlockSize - len(src)%aes.BlockSize - pad := bytes.Repeat([]byte{byte(padding)}, padding) - return append(src, pad...) -} - -func PKCS5Unpad(src []byte) []byte { - len := len(src) - unpad := int(src[len-1]) - return src[:(len - unpad)] -} - -func Decrypt(key []byte, ciphertext []byte) []byte { - - // Create the AES cipher - block, err := aes.NewCipher(key) - if err != nil { - panic(err) - } - - // Before even testing the decryption, - // if the text is too small, then it is incorrect - if len(ciphertext) < aes.BlockSize { - panic("Text is too short") - } - - // Get the 16 byte IV - iv := ciphertext[:aes.BlockSize] - - // Remove the IV from the ciphertext - ciphertext = ciphertext[aes.BlockSize:] - - // Return a decrypted stream - stream := cipher.NewCFBDecrypter(block, iv) - - // Decrypt bytes from ciphertext - stream.XORKeyStream(ciphertext, ciphertext) - - return ciphertext -} - -func Encrypt(key []byte, ba []byte) []byte { - - // Create the AES cipher - block, err := aes.NewCipher(key) - if err != nil { - panic(err) - } - - // Empty array of 16 + ba length - // Include the IV at the beginning - ciphertext := make([]byte, aes.BlockSize+len(ba)) - - // Slice of first 16 bytes - iv := ciphertext[:aes.BlockSize] - - // Write 16 rand bytes to fill iv - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - panic(err) - } - - // Return an encrypted stream - stream := cipher.NewCFBEncrypter(block, iv) - - // Encrypt bytes from ba to ciphertext - stream.XORKeyStream(ciphertext[aes.BlockSize:], ba) - - return ciphertext -} diff --git a/app/platform/fabric/e2e-test/chaincodes/auctionapp/table_api.go b/app/platform/fabric/e2e-test/chaincodes/auctionapp/table_api.go deleted file mode 100644 index a3caf7695..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/auctionapp/table_api.go +++ /dev/null @@ -1,484 +0,0 @@ -/****************************************************************** -Copyright IT People Corp. 2017 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -******************************************************************/ - -/////////////////////////////////////////////////////////////////////// -// Author : IT People - Mohan Venkataraman - table API for v1.0 -// Purpose: Explore the Hyperledger/fabric and understand -// how to write an chain code, application/chain code boundaries -// The code is not the best as it has just hammered out in a day or two -// Feedback and updates are appreciated -/////////////////////////////////////////////////////////////////////// - -package main - -import ( - "bytes" - "errors" - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" -) - -////////////////////////////////////////////////////////////////////////////////////////////////// -// The recType is a mandatory attribute. The original app was written with a single table -// in mind. The only way to know how to process a record was the 70's style 80 column punch card -// which used a record type field. The array below holds a list of valid record types. -// This could be stored on a blockchain table or an application -////////////////////////////////////////////////////////////////////////////////////////////////// -var recType = []string{"ARTINV", "USER", "BID", "AUCREQ", "POSTTRAN", "OPENAUC", "CLAUC", "XFER", "VERIFY", "DOWNLOAD"} - -////////////////////////////////////////////////////////////////////////////////////////////////// -// The following array holds the list of tables that should be created -// The deploy/init deletes the tables and recreates them every time a deploy is invoked -////////////////////////////////////////////////////////////////////////////////////////////////// -var Objects = []string{"PARTY", "CASHTXN", "User", "UserCat", "Item", "ItemCat", "ItemHistory", "Auction", "AucInit", "AucOpen", "Bid", "Trans"} - -///////////////////////////////////////////////////////////////////////////////////////////////////// -// A Map that holds ObjectNames and the number of Keys -// This information is used to dynamically Create, Update -// Replace , and Query the Ledger -// In this model all attributes in a table are strings -// The chain code does both validation -// A dummy key like 2016 in some cases is used for a query to get all rows -// -// "User": 1, Key: UserID -// "Item": 1, Key: ItemID -// "UserCat": 3, Key: "2016", UserType, UserID -// "ItemCat": 3, Key: "2016", ItemSubject, ItemID -// "Auction": 1, Key: AuctionID -// "AucInit": 2, Key: Year, AuctionID -// "AucOpen": 2, Key: Year, AuctionID -// "Trans": 2, Key: AuctionID, ItemID -// "Bid": 2, Key: AuctionID, BidNo -// "ItemHistory": 4, Key: ItemID, Status, AuctionHouseID(if applicable),date-time -// -// The additional key is the ObjectType (aka ObjectName or Object). The keys would be -// keys: {"User", UserId} or keys: {"AuctInit", "2016", "1134"} -///////////////////////////////////////////////////////////////////////////////////////////////////// - -func GetNumberOfKeys(tname string) int { - ObjectMap := map[string]int{ - "User": 1, - "Item": 1, - "UserCat": 3, - "ItemCat": 3, - "Auction": 1, - "AucInit": 2, - "AucOpen": 2, - "Trans": 2, - "Bid": 2, - "ItemHistory": 4, - "PARTY": 2, - "CASHTXN": 1, - } - return ObjectMap[tname] -} - -///////////////////////////////////////////////////////////////// -// This function checks the incoming args for a valid record -// type entry as per the declared array recType[] -// The rectType attribute can be anywhere in the args or struct -// not necessarily in args[1] as per my old logic -// The Request type is used to direct processing -// the record accordingly e: recType is "USER" -// "Args":["PostUser","100", "USER", "Ashley Hart", "TRD", "Morrisville Parkway, #216, Morrisville, NC 27560", -// "9198063535", "ashley@it people.com", "SUNTRUST", "0001732345", "0234678", "2017-01-02 15:04:05"]}' -///////////////////////////////////////////////////////////////// -func ChkRecType(args []string) bool { - for _, rt := range args { - for _, val := range recType { - if val == rt { - return true - } - } - } - return false -} - -///////////////////////////////////////////////////////////////// -// Checks if the incoming invoke has a valid requesType -// The Request type is used to process the record accordingly -// Old Logic (see new logic up) -///////////////////////////////////////////////////////////////// -func CheckRecType(rt string) bool { - for _, val := range recType { - if val == rt { - fmt.Println("CheckRequestType() : Valid Request Type , val : ", val, rt, "\n") - return true - } - } - fmt.Println("CheckRequestType() : Invalid Request Type , val : ", rt, "\n") - return false -} - -///////////////////////////////////////////////////////////////// -// Checks if the args contain a valid Record Type. Typically, this -// model expects the Object Type to be args[2] but -// for the sake of flexibility, it scans the input data for -// a valid type if available -///////////////////////////////////////////////////////////////// -func IdentifyRecType(args []string) (string, error) { - for _, rt := range args { - for _, val := range recType { - if val == rt { - return rt, nil - } - } - } - return "", fmt.Errorf("IdentifyRecType: Not Found") -} - -///////////////////////////////////////////////////////////////// -// Checks if the args contain a valid Object Type. Typically, this -// model expects the Object Type to be args[0] but -// for the sake of flexibility, it scans the input data for -// a valid type if available -///////////////////////////////////////////////////////////////// -func IdentifyObjectType(args []string) (string, error) { - for _, rt := range args { - for _, val := range Objects { - if val == rt { - return rt, nil - } - } - } - return "", fmt.Errorf("IdentifyObjectType: Object Not Found") -} - -//////////////////////////////////////////////////////////////////////////// -// Open a Ledgers if one does not exist -// These ledgers will be used to write / read data -//////////////////////////////////////////////////////////////////////////// -func InitObject(stub shim.ChaincodeStubInterface, objectType string, keys []string) error { - - fmt.Println(">> Not Implemented Yet << Initializing Object : ", objectType, " Keys: ", keys) - return nil -} - -//////////////////////////////////////////////////////////////////////////// -// Update the Object - Replace current data with replacement -// Register users into this table -//////////////////////////////////////////////////////////////////////////// -func UpdateObject(stub shim.ChaincodeStubInterface, objectType string, keys []string, objectData []byte) error { - - // Check how many keys - - err := VerifyAtLeastOneKeyIsPresent(objectType, keys) - if err != nil { - return err - } - - // Convert keys to compound key - compositeKey, _ := stub.CreateCompositeKey(objectType, keys) - - // Add Object JSON to state - err = stub.PutState(compositeKey, objectData) - if err != nil { - fmt.Println("UpdateObject() : Error inserting Object into State Database %s", err) - return err - } - - return nil - -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Retrieve the object based on the key and simply delete it -// -//////////////////////////////////////////////////////////////////////////////////////////////////////////// -func DeleteObject(stub shim.ChaincodeStubInterface, objectType string, keys []string) error { - - // Check how many keys - - err := VerifyAtLeastOneKeyIsPresent(objectType, keys) - if err != nil { - return err - } - - // Convert keys to compound key - compositeKey, _ := stub.CreateCompositeKey(objectType, keys) - - // Remove object from the State Database - err = stub.DelState(compositeKey) - if err != nil { - fmt.Println("DeleteObject() : Error deleting Object into State Database %s", err) - return err - } - fmt.Println("DeleteObject() : ", "Object : ", objectType, " Key : ", compositeKey) - - return nil -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Delete all objects of ObjectType -// -//////////////////////////////////////////////////////////////////////////////////////////////////////////// -func DeleteAllObjects(stub shim.ChaincodeStubInterface, objectType string) error { - - // Convert keys to compound key - compositeKey, _ := stub.CreateCompositeKey(objectType, []string{""}) - - // Remove object from the State Database - err := stub.DelState(compositeKey) - if err != nil { - fmt.Println("DeleteAllObjects() : Error deleting all Object into State Database %s", err) - return err - } - fmt.Println("DeleteObject() : ", "Object : ", objectType, " Key : ", compositeKey) - - return nil -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Replaces the Entry in the Ledger -// The existing object is simply queried and the data contents is replaced with -// new content -//////////////////////////////////////////////////////////////////////////////////////////////////////////// -func ReplaceObject(stub shim.ChaincodeStubInterface, objectType string, keys []string, objectData []byte) error { - - // Check how many keys - - err := VerifyAtLeastOneKeyIsPresent(objectType, keys) - if err != nil { - return err - } - - // Convert keys to compound key - compositeKey, _ := stub.CreateCompositeKey(objectType, keys) - - // Add Party JSON to state - err = stub.PutState(compositeKey, objectData) - if err != nil { - fmt.Println("ReplaceObject() : Error replacing Object in State Database %s", err) - return err - } - - fmt.Println("ReplaceObject() : - end init object ", objectType) - return nil -} - -//////////////////////////////////////////////////////////////////////////// -// Query a User Object by Object Name and Key -// This has to be a full key and should return only one unique object -//////////////////////////////////////////////////////////////////////////// -func QueryObject(stub shim.ChaincodeStubInterface, objectType string, keys []string) ([]byte, error) { - - // Check how many keys - - err := VerifyAtLeastOneKeyIsPresent(objectType, keys) - if err != nil { - return nil, err - } - - compoundKey, _ := stub.CreateCompositeKey(objectType, keys) - fmt.Println("QueryObject() : Compound Key : ", compoundKey) - - Avalbytes, err := stub.GetState(compoundKey) - if err != nil { - return nil, err - } - - return Avalbytes, nil -} - -//////////////////////////////////////////////////////////////////////////// -// Query a User Object by Object Name and Key -// This has to be a full key and should return only one unique object -//////////////////////////////////////////////////////////////////////////// -func QueryObjectWithProcessingFunction(stub shim.ChaincodeStubInterface, objectType string, keys []string, fname func(shim.ChaincodeStubInterface, []byte, []string) error) ([]byte, error) { - - // Check how many keys - - err := VerifyAtLeastOneKeyIsPresent(objectType, keys) - if err != nil { - return nil, err - } - - compoundKey, _ := stub.CreateCompositeKey(objectType, keys) - fmt.Println("QueryObject: Compound Key : ", compoundKey) - - Avalbytes, err := stub.GetState(compoundKey) - if err != nil { - return nil, err - } - - if Avalbytes == nil { - return nil, fmt.Errorf("QueryObject: No Data Found for Compound Key : ", compoundKey) - } - - // Perform Any additional processing of data - fmt.Println("fname() : Successful - Proceeding to fname") - - err = fname(stub, Avalbytes, keys) - if err != nil { - fmt.Println("QueryLedger() : Cannot execute : ", fname) - jsonResp := "{\"fname() Error\":\" Cannot create Object for key " + compoundKey + "\"}" - return Avalbytes, errors.New(jsonResp) - } - - return Avalbytes, nil -} - -//////////////////////////////////////////////////////////////////////////// -// Get a List of Rows based on query criteria from the OBC -// The getList Function -//////////////////////////////////////////////////////////////////////////// -func GetKeyList(stub shim.ChaincodeStubInterface, args []string) (shim.StateQueryIteratorInterface, error) { - - // Define partial key to query within objects namespace (objectType) - objectType := args[0] - - // Check how many keys - - err := VerifyAtLeastOneKeyIsPresent(objectType, args[1:]) - if err != nil { - return nil, err - } - - // Execute the Query - // This will execute a key range query on all keys starting with the compound key - resultsIterator, err := stub.GetStateByPartialCompositeKey(objectType, args[1:]) - if err != nil { - return nil, err - } - - defer resultsIterator.Close() - - // Iterate through result set - var i int - for i = 0; resultsIterator.HasNext(); i++ { - - // Retrieve the Key and Object - myCompositeKey, err := resultsIterator.Next() - if err != nil { - return nil, err - } - fmt.Println("GetList() : my Value : ", myCompositeKey) - } - return resultsIterator, nil -} - -/////////////////////////////////////////////////////////////////////////////////////////// -// GetQueryResultForQueryString executes the passed in query string. -// Result set is built and returned as a byte array containing the JSON results. -/////////////////////////////////////////////////////////////////////////////////////////// -func GetQueryResultForQueryString(stub shim.ChaincodeStubInterface, queryString string) ([]byte, error) { - - fmt.Println("GetQueryResultForQueryString() : getQueryResultForQueryString queryString:\n%s\n", queryString) - - resultsIterator, err := stub.GetQueryResult(queryString) - if err != nil { - return nil, err - } - defer resultsIterator.Close() - - // buffer is a JSON array containing QueryRecords - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - queryResponse, err := resultsIterator.Next() - if err != nil { - return nil, err - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"Key\":") - buffer.WriteString("\"") - buffer.WriteString(queryResponse.Key) - buffer.WriteString("\"") - - buffer.WriteString(", \"Record\":") - // Record is a JSON object, so we write as-is - buffer.WriteString(string(queryResponse.Value)) - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Println("GetQueryResultForQueryString(): getQueryResultForQueryString queryResult:\n%s\n", buffer.String()) - - return buffer.Bytes(), nil -} - -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Retrieve a list of Objects from the Query -// The function returns an iterator from which objects can be retrieved. -// defer rs.Close() -// -// // Iterate through result set -// var i int -// for i = 0; rs.HasNext(); i++ { -// -// // We can process whichever return value is of interest -// myKey , myKeyVal , err := rs.Next() -// if err != nil { -// return shim.Success(nil) -// } -// bob, _ := JSONtoUser(myKeyVal) -// fmt.Println("GetList() : my Value : ", bob) -// } -// -// eg: Args":["fetchlist", "PARTY","CHK"]} -// fetchList is the function that calls getList : ObjectType = "Party" and key is "CHK" -////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -func GetList(stub shim.ChaincodeStubInterface, objectType string, keys []string) (shim.StateQueryIteratorInterface, error) { - - // Check how many keys - - err := VerifyAtLeastOneKeyIsPresent(objectType, keys) - if err != nil { - return nil, err - } - - // Get Result set - resultIter, err := stub.GetStateByPartialCompositeKey(objectType, keys) - fmt.Println("GetList(): Retrieving Objects into an array") - if err != nil { - return nil, err - } - - // Return iterator for result set - // Use code above to retrieve objects - return resultIter, nil -} - -//////////////////////////////////////////////////////////////////////////// -// This function verifies if the number of key provided is at least 1 and -// < the max keys defined for the Object -//////////////////////////////////////////////////////////////////////////// - -func VerifyAtLeastOneKeyIsPresent(objectType string, args []string) error { - - // Check how many keys - nKeys := GetNumberOfKeys(objectType) - nCol := len(args) - if nCol == 1 { - return nil - } - - if nCol < 1 { - error_str := fmt.Sprintf("VerifyAtLeastOneKeyIsPresent() Failed: Atleast 1 Key must is needed : nKeys : %s, nCol : %s ", nKeys, nCol) - fmt.Println(error_str) - return errors.New(error_str) - } - - return nil -} diff --git a/app/platform/fabric/e2e-test/chaincodes/autovendor/chaincode/main.go b/app/platform/fabric/e2e-test/chaincodes/autovendor/chaincode/main.go deleted file mode 100644 index dedc926a1..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/autovendor/chaincode/main.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright Greg Haskins All Rights Reserved - * - * SPDX-License-Identifier: Apache-2.0 - * - * The purpose of this test code is to prove that the system properly packages - * up dependencies. We therefore synthesize the scenario where a chaincode - * imports non-standard dependencies both directly and indirectly and then - * expect a unit-test to verify that the package includes everything needed - * and ultimately builds properly. - * - */ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" - "github.com/hyperledger/fabric/test/chaincodes/AutoVendor/directdep" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Error("NOT IMPL") -} - -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Error("NOT IMPL") -} - -func main() { - directdep.PointlessFunction() - - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/autovendor/directdep/core.go b/app/platform/fabric/e2e-test/chaincodes/autovendor/directdep/core.go deleted file mode 100644 index a19928a14..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/autovendor/directdep/core.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright Greg Haskins All Rights Reserved - * - * SPDX-License-Identifier: Apache-2.0 - * - * See github.com/hyperledger/fabric/test/chaincodes/AutoVendor/chaincode/main.go for details - */ -package directdep - -import ( - "github.com/hyperledger/fabric/test/chaincodes/AutoVendor/indirectdep" -) - -func PointlessFunction() { - // delegate to our indirect dependency - indirectdep.PointlessFunction() -} diff --git a/app/platform/fabric/e2e-test/chaincodes/autovendor/indirectdep/core.go b/app/platform/fabric/e2e-test/chaincodes/autovendor/indirectdep/core.go deleted file mode 100644 index 4a3fa9d3e..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/autovendor/indirectdep/core.go +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright Greg Haskins All Rights Reserved - * - * SPDX-License-Identifier: Apache-2.0 - * - * See github.com/hyperledger/fabric/test/chaincodes/AutoVendor/chaincode/main.go for details - */ -package indirectdep - -import "fmt" - -func PointlessFunction() { - fmt.Printf("Successfully invoked pointless function\n") -} diff --git a/app/platform/fabric/e2e-test/chaincodes/badimport/main.go b/app/platform/fabric/e2e-test/chaincodes/badimport/main.go deleted file mode 100644 index f2a4963a5..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/badimport/main.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Greg Haskins All Rights Reserved - * - * SPDX-License-Identifier: Apache-2.0 - * - */ - -package main - -import ( - "github.com/hyperledger/fabric/core/chaincode/shim" - "fmt" - "bogus/package" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/example02/node/chaincode_example02.js b/app/platform/fabric/e2e-test/chaincodes/example02/node/chaincode_example02.js deleted file mode 100644 index 3ace69b1d..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/example02/node/chaincode_example02.js +++ /dev/null @@ -1,141 +0,0 @@ -/* -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -*/ - -const shim = require('fabric-shim'); -const util = require('util'); - -var Chaincode = class { - - // Initialize the chaincode - async Init(stub) { - console.info('========= example02 Init ========='); - let ret = stub.getFunctionAndParameters(); - console.info(ret); - let args = ret.params; - // initialise only if 4 parameters passed. - if (args.length != 4) { - return shim.error('Incorrect number of arguments. Expecting 4'); - } - - let A = args[0]; - let B = args[2]; - let Aval = args[1]; - let Bval = args[3]; - - if (typeof parseInt(Aval) !== 'number' || typeof parseInt(Bval) !== 'number') { - return shim.error('Expecting integer value for asset holding'); - } - - try { - await stub.putState(A, Buffer.from(Aval)); - try { - await stub.putState(B, Buffer.from(Bval)); - return shim.success(); - } catch (err) { - return shim.error(err); - } - } catch (err) { - return shim.error(err); - } - } - - async Invoke(stub) { - console.info('Transaction ID: ' + stub.getTxID()); - console.info(util.format('Args: %j', stub.getArgs())); - - let ret = stub.getFunctionAndParameters(); - console.info(ret); - let method = this[ret.fcn]; - if (!method) { - console.log('no method of name:' + ret.fcn + ' found'); - return shim.success(); - } - try { - let payload = await method(stub, ret.params); - return shim.success(payload); - } catch (err) { - console.log(err); - return shim.error(err); - } - } - - async invoke(stub, args) { - if (args.length != 3) { - throw new Error('Incorrect number of arguments. Expecting 3'); - } - - let A = args[0]; - let B = args[1]; - if (!A || !B) { - throw new Error('asset holding must not be empty'); - } - - // Get the state from the ledger - let Avalbytes = await stub.getState(A); - if (!Avalbytes) { - throw new Error('Failed to get state of asset holder A'); - } - let Aval = parseInt(Avalbytes.toString()); - - let Bvalbytes = await stub.getState(B); - if (!Bvalbytes) { - throw new Error('Failed to get state of asset holder B'); - } - - let Bval = parseInt(Bvalbytes.toString()); - // Perform the execution - let amount = parseInt(args[2]); - if (typeof amount !== 'number') { - throw new Error('Expecting integer value for amount to be transaferred'); - } - - Aval = Aval - amount; - Bval = Bval + amount; - console.info(util.format('Aval = %d, Bval = %d\n', Aval, Bval)); - - // Write the states back to the ledger - await stub.putState(A, Buffer.from(Aval.toString())); - await stub.putState(B, Buffer.from(Bval.toString())); - - } - - // Deletes an entity from state - async delete(stub, args) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting 1'); - } - - let A = args[0]; - - // Delete the key from the state in ledger - await stub.deleteState(A); - } - - // query callback representing the query of a chaincode - async query(stub, args) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting name of the person to query') - } - - let jsonResp = {}; - let A = args[0]; - - // Get the state from the ledger - let Avalbytes = await stub.getState(A); - if (!Avalbytes) { - jsonResp.error = 'Failed to get state for ' + A; - throw new Error(JSON.stringify(jsonResp)); - } - - jsonResp.name = A; - jsonResp.amount = Avalbytes.toString(); - console.info('Query Response:'); - console.info(jsonResp); - return Avalbytes; - } -}; - -shim.start(new Chaincode()); diff --git a/app/platform/fabric/e2e-test/chaincodes/example02/node/package.json b/app/platform/fabric/e2e-test/chaincodes/example02/node/package.json deleted file mode 100644 index 2e46000f7..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/example02/node/package.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "name": "example02", - "version": "1.0.0", - "description": "example02 chaincode implemented in node.js", - "engines": { - "node": ">=8.4.0", - "npm": ">=5.3.0" - }, - "scripts": { "start" : "node chaincode_example02.js" }, - "engine-strict": true, - "engineStrict": true, - "license": "Apache-2.0", - "dependencies": { - "fabric-shim": "unstable" - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/map_private/go/map_private.go b/app/platform/fabric/e2e-test/chaincodes/map_private/go/map_private.go deleted file mode 100644 index 7897d0473..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/map_private/go/map_private.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright IBM Corp. 2018 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -var logger = shim.NewLogger("simpleChaincode") - -// COLLECTION is local collection -const COLLECTION = "collectionSimple" - -// simpleChaincode allows the following transactions -// "put", "key", val - returns "OK" on success -// "get", "key" - returns val stored previously -// "getPut", "key", val - gets a values if stored and returns "OK" on success -// "getPrivate", "key" - returns private value stored previously -// "putPrivate", "key" - returns val stored previously -// "getPutPrivate", "key" - gets private value if stored and returns "OK" on success -type simpleChaincode struct { -} - -//Init implements chaincode's Init interface -func (t *simpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - logger.Info("########### Init ###########") - return shim.Success(nil) -} - -//Invoke implements chaincode's Invoke interface -func (t *simpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - if function != "invoke" { - logger.Error("Unknown function call") - return shim.Error("Unknown function call") - } - if len(args) < 2 { - logger.Errorf(fmt.Sprintf("invalid number of args %d", len(args))) - return shim.Error(fmt.Sprintf("invalid number of args %d", len(args))) - } - method := args[0] - logger.Infof(">>>>>>> Invoke method : %s ", method) - switch method { - - case "get": - return t.get(stub, args) - - case "put": - if len(args) < 3 { - logger.Errorf(fmt.Sprintf("invalid number of args for put %d", len(args))) - return shim.Error(fmt.Sprintf("invalid number of args for put %d", len(args))) - } - return t.put(stub, args) - - case "getPut": - if len(args) < 3 { - logger.Errorf(fmt.Sprintf("invalid number of args for getPut %d", len(args))) - return shim.Error(fmt.Sprintf("invalid number of args for getPut %d", len(args))) - } - return t.getPut(stub, args) - - case "getPrivate": - return t.getPrivate(stub, args) - - case "putPrivate": - if len(args) < 3 { - logger.Errorf(fmt.Sprintf("invalid number of args for putPrivate %d", len(args))) - return shim.Error(fmt.Sprintf("invalid number of args for putPrivate %d", len(args))) - } - return t.putPrivate(stub, args) - - case "getPutPrivate": - if len(args) < 3 { - logger.Errorf(fmt.Sprintf("invalid number of args for getPutPrivate %d", len(args))) - return shim.Error(fmt.Sprintf("invalid number of args for getPutPrivate %d", len(args))) - } - return t.getPutPrivate(stub, args) - - default: - return shim.Error(fmt.Sprintf("unknown function %s", method)) - } -} - -func (t *simpleChaincode) put(stub shim.ChaincodeStubInterface, args []string) pb.Response { - err := stub.PutState(args[1], []byte(args[2])) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success([]byte("OK")) -} - -func (t *simpleChaincode) get(stub shim.ChaincodeStubInterface, args []string) pb.Response { - // Get the state from the ledger - val, err := stub.GetState(args[1]) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(val) -} - -func (t *simpleChaincode) getPut(stub shim.ChaincodeStubInterface, args []string) pb.Response { - // Get the state from the ledger - _, err := stub.GetState(args[1]) - if err != nil { - return shim.Error(err.Error()) - } - err = stub.PutState(args[1], []byte(args[2])) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success([]byte("OK")) -} - -func (t *simpleChaincode) putPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - err := stub.PutPrivateData(COLLECTION, args[1], []byte(args[2])) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success([]byte("OK")) -} - -func (t *simpleChaincode) getPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - // Get the state from the private ledger - val, err := stub.GetPrivateData(COLLECTION, args[1]) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(val) -} - -func (t *simpleChaincode) getPutPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - // Get the state from the private ledger - _, err := stub.GetPrivateData(COLLECTION, args[1]) - if err != nil { - return shim.Error(err.Error()) - } - err = stub.PutPrivateData(COLLECTION, args[1], []byte(args[2])) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success([]byte("OK")) -} - -func main() { - err := shim.Start(new(simpleChaincode)) - if err != nil { - fmt.Printf("Error starting New key per invoke: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/mapkeys/go/map.go b/app/platform/fabric/e2e-test/chaincodes/mapkeys/go/map.go deleted file mode 100644 index e51a8474e..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/mapkeys/go/map.go +++ /dev/null @@ -1,203 +0,0 @@ -// +build !experimental - -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// This chaincode implements a simple map that is stored in the state. -// The following operations are available. - -// Invoke operations -// put - requires two arguments, a key and value -// remove - requires a key -// get - requires one argument, a key, and returns a value -// keys - requires no arguments, returns all keys - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -// Init is a no-op -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke has two functions -// put - takes two arguments, a key and value, and stores them in the state -// remove - takes one argument, a key, and removes if from the state -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - fmt.Println("In mapkeys") - switch function { - - case "put": - if len(args) < 4 { - fmt.Println("Args list is too short!") - return shim.Error("Args list is too short!!!!!!!!!!!!!!!!") - } - - if len(args)%2 != 0 { - fmt.Println("Odd number of arguments. Need to supply key/value pairs!") - return shim.Error("put operation must include an even number of key/value pair arguments: [key, value]") - } - fmt.Println("In mapkeys put") - for index, element := range args { - if index%2 == 0 { - key := element - value := args[index+1] - fmt.Printf("key %s\n", key) - fmt.Printf("value %s\n", value) - - if err := stub.PutState(key, []byte(value)); err != nil { - fmt.Printf("Error putting state %s", err) - return shim.Error(fmt.Sprintf("put operation failed. Error updating state: %s", err)) - } - - } - } - fmt.Printf("Done with 'put' in mapkeys\n") - return shim.Success(nil) - - case "remove": - if len(args) < 1 { - return shim.Error("remove operation must include one argument: [key]") - } - key := args[0] - - err := stub.DelState(key) - if err != nil { - return shim.Error(fmt.Sprintf("remove operation failed. Error updating state: %s", err)) - } - return shim.Success(nil) - - case "get": - if len(args) < 1 { - return shim.Error("get operation must include one argument, a key") - } - key := args[0] - value, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("get operation failed. Error accessing state: %s", err)) - } - jsonVal, err := json.Marshal(string(value)) - return shim.Success(jsonVal) - - case "keys": - if len(args) < 2 { - return shim.Error("put operation must include two arguments, a key and value") - } - startKey := args[0] - endKey := args[1] - - //sleep needed to test peer's timeout behavior when using iterators - stime := 0 - if len(args) > 2 { - stime, _ = strconv.Atoi(args[2]) - } - - keysIter, err := stub.GetStateByRange(startKey, endKey) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - //if sleeptime is specied, take a nap - if stime > 0 { - time.Sleep(time.Duration(stime) * time.Millisecond) - } - - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - for key, value := range keys { - fmt.Printf("key %d contains %s\n", key, value) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) - case "query": - query := args[0] - keysIter, err := stub.GetQueryResult(query) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) - case "history": - key := args[0] - keysIter, err := stub.GetHistoryForKey(key) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.TxId) - } - - for key, txID := range keys { - fmt.Printf("key %d contains %s\n", key, txID) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) - - default: - return shim.Success([]byte("Unsupported operation")) - } -} - -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/mapkeys/go/mapkeys.go b/app/platform/fabric/e2e-test/chaincodes/mapkeys/go/mapkeys.go deleted file mode 100644 index 6f12a9d33..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/mapkeys/go/mapkeys.go +++ /dev/null @@ -1,305 +0,0 @@ -// +build experimental - -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// This chaincode implements a simple map that is stored in the state. -// The following operations are available. - -// Invoke operations -// put - requires two arguments, a key and value -// remove - requires a key -// get - requires one argument, a key, and returns a value -// keys - requires no arguments, returns all keys - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -// Init is a no-op -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke has two functions -// put - takes two arguments, a key and value, and stores them in the state -// remove - takes one argument, a key, and removes if from the state -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - switch function { - - case "putPrivate": - if len(args) < 3 { - return shim.Error("put operation on private data must include three arguments: [collection, key, value]") - } - collection := args[0] - key := args[1] - value := args[2] - - if err := stub.PutPrivateData(collection, key, []byte(value)); err != nil { - fmt.Printf("Error putting private data%s", err) - return shim.Error(fmt.Sprintf("put operation failed. Error updating state: %s", err)) - } - - return shim.Success(nil) - - case "removePrivate": - if len(args) < 2 { - return shim.Error("remove operation on private data must include two arguments: [collection, key]") - } - collection := args[0] - key := args[1] - - err := stub.DelPrivateData(collection, key) - if err != nil { - return shim.Error(fmt.Sprintf("remove operation on private data failed. Error updating state: %s", err)) - } - return shim.Success(nil) - - case "getPrivate": - if len(args) < 2 { - return shim.Error("get operation on private data must include two arguments: [collection, key]") - } - collection := args[0] - key := args[1] - value, err := stub.GetPrivateData(collection, key) - if err != nil { - return shim.Error(fmt.Sprintf("get operation on private data failed. Error accessing state: %s", err)) - } - jsonVal, err := json.Marshal(string(value)) - return shim.Success(jsonVal) - - case "keysPrivate": - if len(args) < 3 { - return shim.Error("range query operation on private data must include three arguments, a collection, key and value") - } - collection := args[0] - startKey := args[1] - endKey := args[2] - - //sleep needed to test peer's timeout behavior when using iterators - stime := 0 - if len(args) > 3 { - stime, _ = strconv.Atoi(args[3]) - } - - keysIter, err := stub.GetPrivateDataByRange(collection, startKey, endKey) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation failed on private data. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - //if sleeptime is specied, take a nap - if stime > 0 { - time.Sleep(time.Duration(stime) * time.Millisecond) - } - - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("keys operation on private data failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - for key, value := range keys { - fmt.Printf("key %d contains %s\n", key, value) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation on private data failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) - - case "queryPrivate": - collection := args[0] - query := args[1] - keysIter, err := stub.GetPrivateDataQueryResult(collection, query) - if err != nil { - return shim.Error(fmt.Sprintf("query operation on private data failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("query operation on private data failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("query operation on private data failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) - - case "put": - if len(args)%2 != 0 { - return shim.Error("put operation must include an even number of key/value pair arguments: [key, value]") - } - fmt.Println("In mapkeys put") - for index, element := range args { - if index%2 == 0 { - key := element - value := args[index+1] - fmt.Println("Saved {%s,%s}", key, value) - - if err := stub.PutState(key, []byte(value)); err != nil { - fmt.Printf("Error putting state %s", err) - return shim.Error(fmt.Sprintf("put operation failed. Error updating state: %s", err)) - } - } - } - - return shim.Success(nil) - - case "remove": - if len(args) < 1 { - return shim.Error("remove operation must include one argument: [key]") - } - key := args[0] - - err := stub.DelState(key) - if err != nil { - return shim.Error(fmt.Sprintf("remove operation failed. Error updating state: %s", err)) - } - return shim.Success(nil) - - case "get": - if len(args) < 1 { - return shim.Error("get operation must include one argument, a key") - } - key := args[0] - value, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("get operation failed. Error accessing state: %s", err)) - } - jsonVal, err := json.Marshal(string(value)) - return shim.Success(jsonVal) - - case "keys": - if len(args) < 2 { - return shim.Error("put operation must include two arguments, a key and value") - } - startKey := args[0] - endKey := args[1] - - //sleep needed to test peer's timeout behavior when using iterators - stime := 0 - if len(args) > 2 { - stime, _ = strconv.Atoi(args[2]) - } - - keysIter, err := stub.GetStateByRange(startKey, endKey) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - //if sleeptime is specied, take a nap - if stime > 0 { - time.Sleep(time.Duration(stime) * time.Millisecond) - } - - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - for key, value := range keys { - fmt.Printf("key %d contains %s\n", key, value) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) - case "query": - query := args[0] - keysIter, err := stub.GetQueryResult(query) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) - case "history": - key := args[0] - keysIter, err := stub.GetHistoryForKey(key) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.TxId) - } - - for key, txID := range keys { - fmt.Printf("key %d contains %s\n", key, txID) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) - - default: - return shim.Success([]byte("Unsupported operation")) - } -} - -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/marbles/node/marbles_chaincode.js b/app/platform/fabric/e2e-test/chaincodes/marbles/node/marbles_chaincode.js deleted file mode 100644 index 60484df05..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/marbles/node/marbles_chaincode.js +++ /dev/null @@ -1,409 +0,0 @@ -/* -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -*/ - -// ====CHAINCODE EXECUTION SAMPLES (CLI) ================== - -// ==== Invoke marbles ==== -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble1","blue","35","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble2","red","50","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble3","blue","70","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarble","marble2","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarblesBasedOnColor","blue","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["delete","marble1"]}' - -// ==== Query marbles ==== -// peer chaincode query -C myc1 -n marbles -c '{"Args":["readMarble","marble1"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getMarblesByRange","marble1","marble3"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getHistoryForMarble","marble1"]}' - -// Rich Query (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesByOwner","tom"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"owner\":\"tom\"}}"]}' - -'use strict'; -const shim = require('fabric-shim'); -const util = require('util'); - -let Chaincode = class { - async Init(stub) { - let ret = stub.getFunctionAndParameters(); - console.info(ret); - console.info('=========== Instantiated Marbles Chaincode ==========='); - return shim.success(); - } - - async Invoke(stub) { - console.info('Transaction ID: ' + stub.getTxID()); - console.info(util.format('Args: %j', stub.getArgs())); - - let ret = stub.getFunctionAndParameters(); - console.info(ret); - - let method = this[ret.fcn]; - if (!method) { - console.log('no function of name:' + ret.fcn + ' found'); - throw new Error('Received unknown function ' + ret.fcn + ' invocation'); - } - try { - let payload = await method(stub, ret.params, this); - return shim.success(payload); - } catch (err) { - console.log(err); - return shim.error(err); - } - } - - // =============================================== - // initMarble - create a new marble - // =============================================== - async initMarble(stub, args, thisClass) { - if (args.length != 4) { - throw new Error('Incorrect number of arguments. Expecting 4'); - } - // ==== Input sanitation ==== - console.info('--- start init marble ---') - if (args[0].lenth <= 0) { - throw new Error('1st argument must be a non-empty string'); - } - if (args[1].lenth <= 0) { - throw new Error('2nd argument must be a non-empty string'); - } - if (args[2].lenth <= 0) { - throw new Error('3rd argument must be a non-empty string'); - } - if (args[3].lenth <= 0) { - throw new Error('4th argument must be a non-empty string'); - } - let marbleName = args[0]; - let color = args[1].toLowerCase(); - let owner = args[3].toLowerCase(); - let size = parseInt(args[2]); - if (typeof size !== 'number') { - throw new Error('3rd argument must be a numeric string'); - } - - // ==== Check if marble already exists ==== - let marbleState = await stub.getState(marbleName); - if (marbleState.toString()) { - throw new Error('This marble already exists: ' + marbleName); - } - - // ==== Create marble object and marshal to JSON ==== - let marble = {}; - marble.docType = 'marble'; - marble.name = marbleName; - marble.color = color; - marble.size = size; - marble.owner = owner; - - // === Save marble to state === - await stub.putState(marbleName, Buffer.from(JSON.stringify(marble))); - let indexName = 'color~name' - let colorNameIndexKey = await stub.createCompositeKey(indexName, [marble.color, marble.name]); - console.info(colorNameIndexKey); - // Save index entry to state. Only the key name is needed, no need to store a duplicate copy of the marble. - // Note - passing a 'nil' value will effectively delete the key from state, therefore we pass null character as value - await stub.putState(colorNameIndexKey, Buffer.from('\u0000')); - // ==== Marble saved and indexed. Return success ==== - console.info('- end init marble'); - } - - // =============================================== - // readMarble - read a marble from chaincode state - // =============================================== - async readMarble(stub, args, thisClass) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting name of the marble to query'); - } - - let name = args[0]; - if (!name) { - throw new Error(' marble name must not be empty'); - } - let marbleAsbytes = await stub.getState(name); //get the marble from chaincode state - if (!marbleAsbytes.toString()) { - let jsonResp = {}; - jsonResp.Error = 'Marble does not exist: ' + name; - throw new Error(JSON.stringify(jsonResp)); - } - console.info('======================================='); - console.log(marbleAsbytes.toString()); - console.info('======================================='); - return marbleAsbytes; - } - - // ================================================== - // delete - remove a marble key/value pair from state - // ================================================== - async delete(stub, args, thisClass) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting name of the marble to delete'); - } - let marbleName = args[0]; - if (!marbleName) { - throw new Error('marble name must not be empty'); - } - // to maintain the color~name index, we need to read the marble first and get its color - let valAsbytes = await stub.getState(marbleName); //get the marble from chaincode state - let jsonResp = {}; - if (!valAsbytes) { - jsonResp.error = 'marble does not exist: ' + name; - throw new Error(jsonResp); - } - let marbleJSON = {}; - try { - marbleJSON = JSON.parse(valAsbytes.toString()); - } catch (err) { - jsonResp = {}; - jsonResp.error = 'Failed to decode JSON of: ' + marbleName; - throw new Error(jsonResp); - } - - await stub.deleteState(marbleName); //remove the marble from chaincode state - - // delete the index - let indexName = 'color~name'; - let colorNameIndexKey = stub.createCompositeKey(indexName, [marbleJSON.color, marbleJSON.name]); - if (!colorNameIndexKey) { - throw new Error(' Failed to create the createCompositeKey'); - } - // Delete index entry to state. - await stub.deleteState(colorNameIndexKey); - } - - // =========================================================== - // transfer a marble by setting a new owner name on the marble - // =========================================================== - async transferMarble(stub, args, thisClass) { - // 0 1 - // 'name', 'bob' - if (args.length < 2) { - throw new Error('Incorrect number of arguments. Expecting marblename and owner') - } - - let marbleName = args[0]; - let newOwner = args[1].toLowerCase(); - console.info('- start transferMarble ', marbleName, newOwner); - - let marbleAsBytes = await stub.getState(marbleName); - if (!marbleAsBytes || !marbleAsBytes.toString()) { - throw new Error('marble does not exist'); - } - let marbleToTransfer = {}; - try { - marbleToTransfer = JSON.parse(marbleAsBytes.toString()); //unmarshal - } catch (err) { - let jsonResp = {}; - jsonResp.error = 'Failed to decode JSON of: ' + marbleName; - throw new Error(jsonResp); - } - console.info(marbleToTransfer); - marbleToTransfer.owner = newOwner; //change the owner - - let marbleJSONasBytes = Buffer.from(JSON.stringify(marbleToTransfer)); - await stub.putState(marbleName, marbleJSONasBytes); //rewrite the marble - - console.info('- end transferMarble (success)'); - } - - // =========================================================================================== - // getMarblesByRange performs a range query based on the start and end keys provided. - - // Read-only function results are not typically submitted to ordering. If the read-only - // results are submitted to ordering, or if the query is used in an update transaction - // and submitted to ordering, then the committing peers will re-execute to guarantee that - // result sets are stable between endorsement time and commit time. The transaction is - // invalidated by the committing peers if the result set has changed between endorsement - // time and commit time. - // Therefore, range queries are a safe option for performing update transactions based on query results. - // =========================================================================================== - async getMarblesByRange(stub, args, thisClass) { - - if (args.length < 2) { - throw new Error('Incorrect number of arguments. Expecting 2'); - } - - let startKey = args[0]; - let endKey = args[1]; - - let resultsIterator = await stub.getStateByRange(startKey, endKey); - let method = thisClass['getAllResults']; - let results = await method(resultsIterator, false); - - return Buffer.from(JSON.stringify(results)); - } - - // ==== Example: GetStateByPartialCompositeKey/RangeQuery ========================================= - // transferMarblesBasedOnColor will transfer marbles of a given color to a certain new owner. - // Uses a GetStateByPartialCompositeKey (range query) against color~name 'index'. - // Committing peers will re-execute range queries to guarantee that result sets are stable - // between endorsement time and commit time. The transaction is invalidated by the - // committing peers if the result set has changed between endorsement time and commit time. - // Therefore, range queries are a safe option for performing update transactions based on query results. - // =========================================================================================== - async transferMarblesBasedOnColor(stub, args, thisClass) { - - // 0 1 - // 'color', 'bob' - if (args.length < 2) { - throw new Error('Incorrect number of arguments. Expecting color and owner'); - } - - let color = args[0]; - let newOwner = args[1].toLowerCase(); - console.info('- start transferMarblesBasedOnColor ', color, newOwner); - - // Query the color~name index by color - // This will execute a key range query on all keys starting with 'color' - let coloredMarbleResultsIterator = await stub.getStateByPartialCompositeKey('color~name', [color]); - - let method = thisClass['transferMarble']; - // Iterate through result set and for each marble found, transfer to newOwner - while (true) { - let responseRange = await coloredMarbleResultsIterator.next(); - if (!responseRange || !responseRange.value || !responseRange.value.key) { - return; - } - console.log(responseRange.value.key); - - // let value = res.value.value.toString('utf8'); - let objectType; - let attributes; - ({ - objectType, - attributes - } = await stub.splitCompositeKey(responseRange.value.key)); - - let returnedColor = attributes[0]; - let returnedMarbleName = attributes[1]; - console.info(util.format('- found a marble from index:%s color:%s name:%s\n', objectType, returnedColor, returnedMarbleName)); - - // Now call the transfer function for the found marble. - // Re-use the same function that is used to transfer individual marbles - let response = await method(stub, [returnedMarbleName, newOwner]); - } - - let responsePayload = util.format('Transferred %s marbles to %s', color, newOwner); - console.info('- end transferMarblesBasedOnColor: ' + responsePayload); - } - - - // ===== Example: Parameterized rich query ================================================= - // queryMarblesByOwner queries for marbles based on a passed in owner. - // This is an example of a parameterized query where the query logic is baked into the chaincode, - // and accepting a single query parameter (owner). - // Only available on state databases that support rich query (e.g. CouchDB) - // ========================================================================================= - async queryMarblesByOwner(stub, args, thisClass) { - // 0 - // 'bob' - if (args.length < 1) { - throw new Error('Incorrect number of arguments. Expecting owner name.') - } - - let owner = args[0].toLowerCase(); - let queryString = {}; - queryString.selector = {}; - queryString.selector.docType = 'marble'; - queryString.selector.owner = owner; - let method = thisClass['getQueryResultForQueryString']; - let queryResults = await method(stub, JSON.stringify(queryString), thisClass); - return queryResults; //shim.success(queryResults); - } - - // ===== Example: Ad hoc rich query ======================================================== - // queryMarbles uses a query string to perform a query for marbles. - // Query string matching state database syntax is passed in and executed as is. - // Supports ad hoc queries that can be defined at runtime by the client. - // If this is not desired, follow the queryMarblesForOwner example for parameterized queries. - // Only available on state databases that support rich query (e.g. CouchDB) - // ========================================================================================= - async queryMarbles(stub, args, thisClass) { - // 0 - // 'queryString' - if (args.length < 1) { - throw new Error('Incorrect number of arguments. Expecting queryString'); - } - let queryString = args[0]; - if (!queryString) { - throw new Error('queryString must not be empty'); - } - let method = thisClass['getQueryResultForQueryString']; - let queryResults = await method(stub, queryString, thisClass); - return queryResults; - } - - async getAllResults(iterator, isHistory) { - let allResults = []; - while (true) { - let res = await iterator.next(); - - if (res.value && res.value.value.toString()) { - let jsonRes = {}; - console.log(res.value.value.toString('utf8')); - - if (isHistory && isHistory === true) { - jsonRes.TxId = res.value.tx_id; - jsonRes.Timestamp = res.value.timestamp; - jsonRes.IsDelete = res.value.is_delete.toString(); - try { - jsonRes.Value = JSON.parse(res.value.value.toString('utf8')); - } catch (err) { - console.log(err); - jsonRes.Value = res.value.value.toString('utf8'); - } - } else { - jsonRes.Key = res.value.key; - try { - jsonRes.Record = JSON.parse(res.value.value.toString('utf8')); - } catch (err) { - console.log(err); - jsonRes.Record = res.value.value.toString('utf8'); - } - } - allResults.push(jsonRes); - } - if (res.done) { - console.log('end of data'); - await iterator.close(); - console.info(allResults); - return allResults; - } - } - } - - // ========================================================================================= - // getQueryResultForQueryString executes the passed in query string. - // Result set is built and returned as a byte array containing the JSON results. - // ========================================================================================= - async getQueryResultForQueryString(stub, queryString, thisClass) { - - console.info('- getQueryResultForQueryString queryString:\n' + queryString) - let resultsIterator = await stub.getQueryResult(queryString); - let method = thisClass['getAllResults']; - - let results = await method(resultsIterator, false); - - return Buffer.from(JSON.stringify(results)); - } - - async getHistoryForMarble(stub, args, thisClass) { - - if (args.length < 1) { - throw new Error('Incorrect number of arguments. Expecting 1') - } - let marbleName = args[0]; - console.info('- start getHistoryForMarble: %s\n', marbleName); - - let resultsIterator = await stub.getHistoryForKey(marbleName); - let method = thisClass['getAllResults']; - let results = await method(resultsIterator, true); - - return Buffer.from(JSON.stringify(results)); - } -}; - -shim.start(new Chaincode()); diff --git a/app/platform/fabric/e2e-test/chaincodes/marbles/node/package.json b/app/platform/fabric/e2e-test/chaincodes/marbles/node/package.json deleted file mode 100644 index 5b8475d8b..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/marbles/node/package.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "name": "marbles", - "version": "1.0.0", - "description": "marbles chaincode implemented in node.js", - "engines": { - "node": ">=8.4.0", - "npm": ">=5.3.0" - }, - "scripts": { "start" : "node marbles_chaincode.js" }, - "engine-strict": true, - "engineStrict": true, - "license": "Apache-2.0", - "dependencies": { - "fabric-shim": "unstable" - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/marbles02_private/marbles_chaincode.go b/app/platform/fabric/e2e-test/chaincodes/marbles02_private/marbles_chaincode.go deleted file mode 100644 index 656761309..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/marbles02_private/marbles_chaincode.go +++ /dev/null @@ -1,628 +0,0 @@ -/* -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -*/ - -// ====CHAINCODE EXECUTION SAMPLES (CLI) ================== - -// ==== Invoke marbles ==== -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble1","blue","35","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble2","red","50","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble3","blue","70","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarble","marble2","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarblesBasedOnColor","blue","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["delete","marble1"]}' - -// ==== Query marbles ==== -// peer chaincode query -C myc1 -n marbles -c '{"Args":["readMarble","marble1"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getMarblesByRange","marble1","marble3"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getHistoryForMarble","marble1"]}' - -// Rich Query (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesByOwner","tom"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"owner\":\"tom\"}}"]}' - -//The following examples demonstrate creating indexes on CouchDB -//Example hostname:port configurations -// -//Docker or vagrant environments: -// http://couchdb:5984/ -// -//Inside couchdb docker container -// http://127.0.0.1:5984/ - -// Index for chaincodeid, docType, owner. -// Note that docType and owner fields must be prefixed with the "data" wrapper -// chaincodeid must be added for all queries -// -// Definition for use with Fauxton interface -// {"index":{"fields":["chaincodeid","data.docType","data.owner"]},"ddoc":"indexOwnerDoc", "name":"indexOwner","type":"json"} -// -// example curl definition for use with command line -// curl -i -X POST -H "Content-Type: application/json" -d "{\"index\":{\"fields\":[\"chaincodeid\",\"data.docType\",\"data.owner\"]},\"name\":\"indexOwner\",\"ddoc\":\"indexOwnerDoc\",\"type\":\"json\"}" http://hostname:port/myc1/_index -// - -// Index for chaincodeid, docType, owner, size (descending order). -// Note that docType, owner and size fields must be prefixed with the "data" wrapper -// chaincodeid must be added for all queries -// -// Definition for use with Fauxton interface -// {"index":{"fields":[{"data.size":"desc"},{"chaincodeid":"desc"},{"data.docType":"desc"},{"data.owner":"desc"}]},"ddoc":"indexSizeSortDoc", "name":"indexSizeSortDesc","type":"json"} -// -// example curl definition for use with command line -// curl -i -X POST -H "Content-Type: application/json" -d "{\"index\":{\"fields\":[{\"data.size\":\"desc\"},{\"chaincodeid\":\"desc\"},{\"data.docType\":\"desc\"},{\"data.owner\":\"desc\"}]},\"ddoc\":\"indexSizeSortDoc\", \"name\":\"indexSizeSortDesc\",\"type\":\"json\"}" http://hostname:port/myc1/_index - -// Rich Query with index design doc and index name specified (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"docType\":\"marble\",\"owner\":\"tom\"}, \"use_index\":[\"_design/indexOwnerDoc\", \"indexOwner\"]}"]}' - -// Rich Query with index design doc specified only (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"docType\":{\"$eq\":\"marble\"},\"owner\":{\"$eq\":\"tom\"},\"size\":{\"$gt\":0}},\"fields\":[\"docType\",\"owner\",\"size\"],\"sort\":[{\"size\":\"desc\"}],\"use_index\":\"_design/indexSizeSortDoc\"}"]}' - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -type marble struct { - ObjectType string `json:"docType"` //docType is used to distinguish the various types of objects in state database - Name string `json:"name"` //the fieldtags are needed to keep case from bouncing around - Color string `json:"color"` - Size int `json:"size"` - Owner string `json:"owner"` -} - -// =================================================================================== -// Main -// =================================================================================== -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} - -// Init initializes chaincode -// =========================== -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke - Our entry point for Invocations -// ======================================== -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - fmt.Println("invoke is running " + function) - - // Handle different functions - if function == "initMarble" { //create a new marble - return t.initMarble(stub, args) - } else if function == "readMarble" { //read a marble - return t.readMarble(stub, args) - } else if function == "transferMarble" { //change owner of a specific marble - return t.transferMarble(stub, args) - } else if function == "transferMarblesBasedOnColor" { //transfer all marbles of a certain color - return t.transferMarblesBasedOnColor(stub, args) - } else if function == "delete" { //delete a marble - return t.delete(stub, args) - } else if function == "queryMarblesByOwner" { //find marbles for owner X using rich query - return t.queryMarblesByOwner(stub, args) - } else if function == "queryMarbles" { //find marbles based on an ad hoc rich query - return t.queryMarbles(stub, args) - } else if function == "getHistoryForMarble" { //get history of values for a marble - return t.getHistoryForMarble(stub, args) - } else if function == "getMarblesByRange" { //get marbles based on range query - return t.getMarblesByRange(stub, args) - } - - fmt.Println("invoke did not find func: " + function) //error - return shim.Error("Received unknown function invocation") -} - -// ============================================================ -// initMarble - create a new marble, store into chaincode state -// ============================================================ -func (t *SimpleChaincode) initMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var err error - - // 0 1 2 3 - // "asdf", "blue", "35", "bob" - if len(args) != 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - // ==== Input sanitation ==== - fmt.Println("- start init marble") - if len(args[0]) <= 0 { - return shim.Error("1st argument must be a non-empty string") - } - if len(args[1]) <= 0 { - return shim.Error("2nd argument must be a non-empty string") - } - if len(args[2]) <= 0 { - return shim.Error("3rd argument must be a non-empty string") - } - if len(args[3]) <= 0 { - return shim.Error("4th argument must be a non-empty string") - } - marbleName := args[0] - color := strings.ToLower(args[1]) - owner := strings.ToLower(args[3]) - size, err := strconv.Atoi(args[2]) - if err != nil { - return shim.Error("3rd argument must be a numeric string") - } - - // ==== Check if marble already exists ==== - marbleAsBytes, err := stub.GetPrivateData("collection1", marbleName) - if err != nil { - return shim.Error("Failed to get marble: " + err.Error()) - } else if marbleAsBytes != nil { - fmt.Println("This marble already exists: " + marbleName) - return shim.Error("This marble already exists: " + marbleName) - } - - // ==== Create marble object and marshal to JSON ==== - objectType := "marble" - marble := &marble{objectType, marbleName, color, size, owner} - marbleJSONasBytes, err := json.Marshal(marble) - if err != nil { - return shim.Error(err.Error()) - } - //Alternatively, build the marble json string manually if you don't want to use struct marshalling - //marbleJSONasString := `{"docType":"Marble", "name": "` + marbleName + `", "color": "` + color + `", "size": ` + strconv.Itoa(size) + `, "owner": "` + owner + `"}` - //marbleJSONasBytes := []byte(str) - - // === Save marble to state === - err = stub.PutPrivateData("collection1", marbleName, marbleJSONasBytes) - if err != nil { - return shim.Error(err.Error()) - } - - /* - // ==== Index the marble to enable color-based range queries, e.g. return all blue marbles ==== - // An 'index' is a normal key/value entry in state. - // The key is a composite key, with the elements that you want to range query on listed first. - // In our case, the composite key is based on indexName~color~name. - // This will enable very efficient state range queries based on composite keys matching indexName~color~* - indexName := "color~name" - colorNameIndexKey, err := stub.CreateCompositeKey(indexName, []string{marble.Color, marble.Name}) - if err != nil { - return shim.Error(err.Error()) - } - // Save index entry to state. Only the key name is needed, no need to store a duplicate copy of the marble. - // Note - passing a 'nil' value will effectively delete the key from state, therefore we pass null character as value - value := []byte{0x00} - stub.PutPrivateData("collection1", colorNameIndexKey, value) - */ - // ==== Marble saved and indexed. Return success ==== - fmt.Println("- end init marble") - return shim.Success(nil) -} - -// =============================================== -// readMarble - read a marble from chaincode state -// =============================================== -func (t *SimpleChaincode) readMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var name, jsonResp string - var err error - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting name of the marble to query") - } - - name = args[0] - valAsbytes, err := stub.GetPrivateData("collection1", name) //get the marble from chaincode state - if err != nil { - jsonResp = "{\"Error\":\"Failed to get state for " + name + "\"}" - return shim.Error(jsonResp) - } else if valAsbytes == nil { - jsonResp = "{\"Error\":\"Marble does not exist: " + name + "\"}" - return shim.Error(jsonResp) - } - - return shim.Success(valAsbytes) -} - -// ================================================== -// delete - remove a marble key/value pair from state -// ================================================== -func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var jsonResp string - var marbleJSON marble - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - marbleName := args[0] - - // to maintain the color~name index, we need to read the marble first and get its color - valAsbytes, err := stub.GetPrivateData("collection1", marbleName) //get the marble from chaincode state - if err != nil { - jsonResp = "{\"Error\":\"Failed to get state for " + marbleName + "\"}" - return shim.Error(jsonResp) - } else if valAsbytes == nil { - jsonResp = "{\"Error\":\"Marble does not exist: " + marbleName + "\"}" - return shim.Error(jsonResp) - } - - err = json.Unmarshal([]byte(valAsbytes), &marbleJSON) - if err != nil { - jsonResp = "{\"Error\":\"Failed to decode JSON of: " + marbleName + "\"}" - return shim.Error(jsonResp) - } - - err = stub.DelPrivateData("collection1", marbleName) //remove the marble from chaincode state - if err != nil { - return shim.Error("Failed to delete state:" + err.Error()) - } - - // maintain the index - indexName := "color~name" - colorNameIndexKey, err := stub.CreateCompositeKey(indexName, []string{marbleJSON.Color, marbleJSON.Name}) - if err != nil { - return shim.Error(err.Error()) - } - - // Delete index entry to state. - err = stub.DelPrivateData("collection1", colorNameIndexKey) - if err != nil { - return shim.Error("Failed to delete state:" + err.Error()) - } - return shim.Success(nil) -} - -// =========================================================== -// transfer a marble by setting a new owner name on the marble -// =========================================================== -func (t *SimpleChaincode) transferMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 1 - // "name", "bob" - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - marbleName := args[0] - newOwner := strings.ToLower(args[1]) - fmt.Println("- start transferMarble ", marbleName, newOwner) - - marbleAsBytes, err := stub.GetPrivateData("collection1", marbleName) - if err != nil { - return shim.Error("Failed to get marble:" + err.Error()) - } else if marbleAsBytes == nil { - return shim.Error("Marble does not exist") - } - - marbleToTransfer := marble{} - err = json.Unmarshal(marbleAsBytes, &marbleToTransfer) //unmarshal it aka JSON.parse() - if err != nil { - return shim.Error(err.Error()) - } - marbleToTransfer.Owner = newOwner //change the owner - - marbleJSONasBytes, _ := json.Marshal(marbleToTransfer) - err = stub.PutPrivateData("collection1", marbleName, marbleJSONasBytes) //rewrite the marble - if err != nil { - return shim.Error(err.Error()) - } - - fmt.Println("- end transferMarble (success)") - return shim.Success(nil) -} - -// =========================================================================================== -// getMarblesByRange performs a range query based on the start and end keys provided. - -// Read-only function results are not typically submitted to ordering. If the read-only -// results are submitted to ordering, or if the query is used in an update transaction -// and submitted to ordering, then the committing peers will re-execute to guarantee that -// result sets are stable between endorsement time and commit time. The transaction is -// invalidated by the committing peers if the result set has changed between endorsement -// time and commit time. -// Therefore, range queries are a safe option for performing update transactions based on query results. -// =========================================================================================== -func (t *SimpleChaincode) getMarblesByRange(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - startKey := args[0] - endKey := args[1] - - resultsIterator, err := stub.GetPrivateDataByRange("collection1", startKey, endKey) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - // buffer is a JSON array containing QueryResults - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - queryResponse, err := resultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"Key\":") - buffer.WriteString("\"") - buffer.WriteString(queryResponse.Key) - buffer.WriteString("\"") - - buffer.WriteString(", \"Record\":") - // Record is a JSON object, so we write as-is - buffer.WriteString(string(queryResponse.Value)) - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Printf("- getMarblesByRange queryResult:\n%s\n", buffer.String()) - - return shim.Success(buffer.Bytes()) -} - -// ==== Example: GetStateByPartialCompositeKey/RangeQuery ========================================= -// transferMarblesBasedOnColor will transfer marbles of a given color to a certain new owner. -// Uses a GetStateByPartialCompositeKey (range query) against color~name 'index'. -// Committing peers will re-execute range queries to guarantee that result sets are stable -// between endorsement time and commit time. The transaction is invalidated by the -// committing peers if the result set has changed between endorsement time and commit time. -// Therefore, range queries are a safe option for performing update transactions based on query results. -// =========================================================================================== -func (t *SimpleChaincode) transferMarblesBasedOnColor(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 1 - // "color", "bob" - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - color := args[0] - newOwner := strings.ToLower(args[1]) - fmt.Println("- start transferMarblesBasedOnColor ", color, newOwner) - - // Query the color~name index by color - // This will execute a key range query on all keys starting with 'color' - coloredMarbleResultsIterator, err := stub.GetPrivateDataByPartialCompositeKey("collection1", "color~name", []string{color}) - if err != nil { - return shim.Error(err.Error()) - } - defer coloredMarbleResultsIterator.Close() - - // Iterate through result set and for each marble found, transfer to newOwner - var i int - for i = 0; coloredMarbleResultsIterator.HasNext(); i++ { - // Note that we don't get the value (2nd return variable), we'll just get the marble name from the composite key - responseRange, err := coloredMarbleResultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - - // get the color and name from color~name composite key - objectType, compositeKeyParts, err := stub.SplitCompositeKey(responseRange.Key) - if err != nil { - return shim.Error(err.Error()) - } - returnedColor := compositeKeyParts[0] - returnedMarbleName := compositeKeyParts[1] - fmt.Printf("- found a marble from index:%s color:%s name:%s\n", objectType, returnedColor, returnedMarbleName) - - // Now call the transfer function for the found marble. - // Re-use the same function that is used to transfer individual marbles - response := t.transferMarble(stub, []string{returnedMarbleName, newOwner}) - // if the transfer failed break out of loop and return error - if response.Status != shim.OK { - return shim.Error("Transfer failed: " + response.Message) - } - } - - responsePayload := fmt.Sprintf("Transferred %d %s marbles to %s", i, color, newOwner) - fmt.Println("- end transferMarblesBasedOnColor: " + responsePayload) - return shim.Success([]byte(responsePayload)) -} - -// =======Rich queries ========================================================================= -// Two examples of rich queries are provided below (parameterized query and ad hoc query). -// Rich queries pass a query string to the state database. -// Rich queries are only supported by state database implementations -// that support rich query (e.g. CouchDB). -// The query string is in the syntax of the underlying state database. -// With rich queries there is no guarantee that the result set hasn't changed between -// endorsement time and commit time, aka 'phantom reads'. -// Therefore, rich queries should not be used in update transactions, unless the -// application handles the possibility of result set changes between endorsement and commit time. -// Rich queries can be used for point-in-time queries against a peer. -// ============================================================================================ - -// ===== Example: Parameterized rich query ================================================= -// queryMarblesByOwner queries for marbles based on a passed in owner. -// This is an example of a parameterized query where the query logic is baked into the chaincode, -// and accepting a single query parameter (owner). -// Only available on state databases that support rich query (e.g. CouchDB) -// ========================================================================================= -func (t *SimpleChaincode) queryMarblesByOwner(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "bob" - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - owner := strings.ToLower(args[0]) - - queryString := fmt.Sprintf("{\"selector\":{\"docType\":\"marble\",\"owner\":\"%s\"}}", owner) - - queryResults, err := getQueryResultForQueryString(stub, queryString) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ===== Example: Ad hoc rich query ======================================================== -// queryMarbles uses a query string to perform a query for marbles. -// Query string matching state database syntax is passed in and executed as is. -// Supports ad hoc queries that can be defined at runtime by the client. -// If this is not desired, follow the queryMarblesForOwner example for parameterized queries. -// Only available on state databases that support rich query (e.g. CouchDB) -// ========================================================================================= -func (t *SimpleChaincode) queryMarbles(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "queryString" - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - queryString := args[0] - - queryResults, err := getQueryResultForQueryString(stub, queryString) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ========================================================================================= -// getQueryResultForQueryString executes the passed in query string. -// Result set is built and returned as a byte array containing the JSON results. -// ========================================================================================= -func getQueryResultForQueryString(stub shim.ChaincodeStubInterface, queryString string) ([]byte, error) { - - fmt.Printf("- getQueryResultForQueryString queryString:\n%s\n", queryString) - - resultsIterator, err := stub.GetPrivateDataQueryResult("collection1", queryString) - if err != nil { - return nil, err - } - defer resultsIterator.Close() - - // buffer is a JSON array containing QueryRecords - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - queryResponse, err := resultsIterator.Next() - if err != nil { - return nil, err - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"Key\":") - buffer.WriteString("\"") - buffer.WriteString(queryResponse.Key) - buffer.WriteString("\"") - - buffer.WriteString(", \"Record\":") - // Record is a JSON object, so we write as-is - buffer.WriteString(string(queryResponse.Value)) - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Printf("- getQueryResultForQueryString queryResult:\n%s\n", buffer.String()) - - return buffer.Bytes(), nil -} - -func (t *SimpleChaincode) getHistoryForMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - marbleName := args[0] - - fmt.Printf("- start getHistoryForMarble: %s\n", marbleName) - - resultsIterator, err := stub.GetHistoryForKey(marbleName) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - // buffer is a JSON array containing historic values for the marble - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - response, err := resultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"TxId\":") - buffer.WriteString("\"") - buffer.WriteString(response.TxId) - buffer.WriteString("\"") - - buffer.WriteString(", \"Value\":") - // if it was a delete operation on given key, then we need to set the - //corresponding value null. Else, we will write the response.Value - //as-is (as the Value itself a JSON marble) - if response.IsDelete { - buffer.WriteString("null") - } else { - buffer.WriteString(string(response.Value)) - } - - buffer.WriteString(", \"Timestamp\":") - buffer.WriteString("\"") - buffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String()) - buffer.WriteString("\"") - - buffer.WriteString(", \"IsDelete\":") - buffer.WriteString("\"") - buffer.WriteString(strconv.FormatBool(response.IsDelete)) - buffer.WriteString("\"") - - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Printf("- getHistoryForMarble returning:\n%s\n", buffer.String()) - - return shim.Success(buffer.Bytes()) -} diff --git a/app/platform/fabric/e2e-test/chaincodes/samplecc/go/chaincode_sample.go b/app/platform/fabric/e2e-test/chaincodes/samplecc/go/chaincode_sample.go deleted file mode 100644 index e6587ba16..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/samplecc/go/chaincode_sample.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "fmt" - "io" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// cryptoChaincode is allows the following transactions -// "put", "key", val - returns "OK" on success -// "get", "key" - returns val stored previously -type cryptoChaincode struct { -} - -const ( - AESKeyLength = 32 // AESKeyLength is the default AES key length - NonceSize = 24 // NonceSize is the default NonceSize -) - -/////////////////////////////////////////////////// -// GetRandomByt es returns len random looking bytes -/////////////////////////////////////////////////// -func GetRandomBytes(len int) ([]byte, error) { - //TODO: Should we fix the length ? - key := make([]byte, len) - - _, err := rand.Read(key) - if err != nil { - return nil, err - } - - return key, nil -} - -//////////////////////////////////////////////////////////// -// GenAESKey returns a random AES key of length AESKeyLength -// 3 Functions to support Encryption and Decryption -// GENAESKey() - Generates AES symmetric key -func (t *cryptoChaincode) GenAESKey() ([]byte, error) { - return GetRandomBytes(AESKeyLength) -} - -//Init implements chaincode's Init interface -func (t *cryptoChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -//Invoke implements chaincode's Invoke interface -func (t *cryptoChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - if function != "invoke" { - return shim.Error("Unknown function call") - } - - if len(args) < 2 { - return shim.Error(fmt.Sprintf("invalid number of args %d", len(args))) - } - method := args[0] - if method == "put" { - if len(args) < 3 { - return shim.Error(fmt.Sprintf("invalid number of args for put %d", len(args))) - } - return t.writeTransaction(stub, args) - } else if method == "get" { - return t.readTransaction(stub, args) - } - return shim.Error(fmt.Sprintf("unknown function %s", method)) -} - -func (t *cryptoChaincode) encryptAndDecrypt(arg string) []byte { - AES_key, _ := t.GenAESKey() - AES_enc := t.Encrypt(AES_key, []byte(arg)) - - value := t.Decrypt(AES_key, AES_enc) - return value -} - -func (t *cryptoChaincode) Encrypt(key []byte, byteArray []byte) []byte { - - // Create the AES cipher - block, err := aes.NewCipher(key) - if err != nil { - panic(err) - } - - // Empty array of 16 + byteArray length - // Include the IV at the beginning - ciphertext := make([]byte, aes.BlockSize+len(byteArray)) - - // Slice of first 16 bytes - iv := ciphertext[:aes.BlockSize] - - // Write 16 rand bytes to fill iv - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - panic(err) - } - - // Return an encrypted stream - stream := cipher.NewCFBEncrypter(block, iv) - - // Encrypt bytes from byteArray to ciphertext - stream.XORKeyStream(ciphertext[aes.BlockSize:], byteArray) - - return ciphertext -} - -func (t *cryptoChaincode) Decrypt(key []byte, ciphertext []byte) []byte { - - // Create the AES cipher - block, err := aes.NewCipher(key) - if err != nil { - panic(err) - } - - // Before even testing the decryption, - // if the text is too small, then it is incorrect - if len(ciphertext) < aes.BlockSize { - panic("Text is too short") - } - - // Get the 16 byte IV - iv := ciphertext[:aes.BlockSize] - - // Remove the IV from the ciphertext - ciphertext = ciphertext[aes.BlockSize:] - - // Return a decrypted stream - stream := cipher.NewCFBDecrypter(block, iv) - - // Decrypt bytes from ciphertext - stream.XORKeyStream(ciphertext, ciphertext) - - return ciphertext -} - -func (t *cryptoChaincode) writeTransaction(stub shim.ChaincodeStubInterface, args []string) pb.Response { - cryptoArg := t.encryptAndDecrypt(args[2]) - err := stub.PutState(args[1], cryptoArg) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success([]byte("OK")) -} - -func (t *cryptoChaincode) readTransaction(stub shim.ChaincodeStubInterface, args []string) pb.Response { - // Get the state from the ledger - val, err := stub.GetState(args[1]) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(val) -} - -func main() { - err := shim.Start(new(cryptoChaincode)) - if err != nil { - fmt.Printf("Error starting New key per invoke: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/build.gradle b/app/platform/fabric/e2e-test/chaincodes/samplecc/java/build.gradle deleted file mode 100644 index fedd5cccc..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/build.gradle +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright IBM Corp. 2017 All Rights Reserved. - * - * SPDX-License-Identifier: Apache-2.0 - */ -plugins { - id 'com.github.johnrengelman.shadow' version '2.0.3' - id 'java' -} - -group 'org.hyperledger.fabric-chaincode-java' -version '1.0-SNAPSHOT' - -sourceCompatibility = 1.8 - -repositories { - mavenLocal() - mavenCentral() -} - -dependencies { - compile group: 'org.hyperledger.fabric-chaincode-java', name: 'fabric-chaincode-shim', version: '1.+' - testCompile group: 'junit', name: 'junit', version: '4.12' -} - -shadowJar { - baseName = 'chaincode' - version = null - classifier = null - - manifest { - attributes 'Main-Class': 'org.hyperledger.fabric.example.SampleCC' - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/gradlew b/app/platform/fabric/e2e-test/chaincodes/samplecc/java/gradlew deleted file mode 100755 index cccdd3d51..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/gradlew +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env sh - -############################################################################## -## -## Gradle start up script for UN*X -## -############################################################################## - -# Attempt to set APP_HOME -# Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi -done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" - -warn () { - echo "$*" -} - -die () { - echo - echo "$*" - echo - exit 1 -} - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." -fi - -# Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi -fi - -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi - -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi - # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" - fi - i=$((i+1)) - done - case $i in - (0) set -- ;; - (1) set -- "$args0" ;; - (2) set -- "$args0" "$args1" ;; - (3) set -- "$args0" "$args1" "$args2" ;; - (4) set -- "$args0" "$args1" "$args2" "$args3" ;; - (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac -fi - -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=$(save "$@") - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" - -# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong -if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then - cd "$(dirname "$0")" -fi - -exec "$JAVACMD" "$@" diff --git a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/gradlew.bat b/app/platform/fabric/e2e-test/chaincodes/samplecc/java/gradlew.bat deleted file mode 100644 index f9553162f..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/gradlew.bat +++ /dev/null @@ -1,84 +0,0 @@ -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/settings.gradle b/app/platform/fabric/e2e-test/chaincodes/samplecc/java/settings.gradle deleted file mode 100644 index 2f364bfc3..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/settings.gradle +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright IBM Corp. 2017 All Rights Reserved. - * - * SPDX-License-Identifier: Apache-2.0 - */ -rootProject.name = 'samplecc' - diff --git a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/src/main/java/org/hyperledger/fabric/example/SampleCC.java b/app/platform/fabric/e2e-test/chaincodes/samplecc/java/src/main/java/org/hyperledger/fabric/example/SampleCC.java deleted file mode 100644 index 23b3f7491..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/samplecc/java/src/main/java/org/hyperledger/fabric/example/SampleCC.java +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright IBM Corp., DTCC All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ -package org.hyperledger.fabric.example; - -import org.hyperledger.fabric.shim.ChaincodeBase; -import org.hyperledger.fabric.shim.ChaincodeStub; - -import javax.crypto.*; -import javax.crypto.spec.IvParameterSpec; -import javax.crypto.spec.SecretKeySpec; -import java.security.InvalidAlgorithmParameterException; -import java.security.InvalidKeyException; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; -import java.util.Base64; -import java.util.List; - -import static java.nio.charset.StandardCharsets.UTF_8; - -public class SampleCC extends ChaincodeBase { - - private static final int AESKeyLength = 32; // AESKeyLength is the default AES key length - java by default support only 128 bit keys - - @Override - public Response init(ChaincodeStub stub) { - return newSuccessResponse(); - } - - @Override - public Response invoke(ChaincodeStub stub) { - String func = stub.getFunction(); - List params = stub.getParameters(); - if (!"invoke".equals(func)) { - return newErrorResponse("Unknown function call"); - } - if (params.size() < 2) { - return newErrorResponse("invalid number of args " + params.size()); - } - String method = params.get(0); - if ("put".equals(method)) { - if (params.size() < 3) { - return newErrorResponse("invalid number of args for put " + params.size()); - } - return writeTransaction(stub, params); - } else if ("get".equals(method)) { - return readTransaction(stub, params); - } else { - return newErrorResponse("unknown function " + method); - } - } - - private Response readTransaction(ChaincodeStub stub, List params) { - try { - byte[] val = stub.getState(params.get(1)); - return newSuccessResponse(val); - } catch (Exception e) { - return newErrorResponse(e); - } - } - - private Response writeTransaction(ChaincodeStub stub, List params) { - try { - byte[] cryptoArg = encryptAndDecrypt(params.get(2)); - stub.putState(params.get(1), cryptoArg); - return newSuccessResponse("OK".getBytes(UTF_8)); - } catch (Exception e) { - return newErrorResponse(e); - } - } - - private byte[] encryptAndDecrypt(String s) throws NoSuchPaddingException, InvalidKeyException, NoSuchAlgorithmException, IllegalBlockSizeException, BadPaddingException, InvalidAlgorithmParameterException { - SecretKey key = genAESKey(AESKeyLength); - byte[] iv = genInitVector(); - String enc = encrypt(key, s, iv); - return decrypt(key, enc, iv); - } - - private String encrypt(SecretKey key, String value, byte[] ivBytes) throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidAlgorithmParameterException, InvalidKeyException, BadPaddingException, IllegalBlockSizeException { - Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); - cipher.init(Cipher.ENCRYPT_MODE, key, new IvParameterSpec(ivBytes)); - byte[] encrypted = cipher.doFinal(value.getBytes()); - return Base64.getEncoder().encodeToString(encrypted); - } - - private byte[] decrypt(SecretKey key, String strToDecrypt, byte[] ivBytes) throws InvalidAlgorithmParameterException, InvalidKeyException, NoSuchPaddingException, NoSuchAlgorithmException, BadPaddingException, IllegalBlockSizeException { - Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); - cipher.init(Cipher.DECRYPT_MODE, key, new IvParameterSpec(ivBytes)); - return cipher.doFinal(Base64.getDecoder().decode(strToDecrypt)); - } - - private SecretKey genAESKey(int keyLength) { - SecureRandom secureRandom = new SecureRandom(); - byte[] key = new byte[keyLength]; - secureRandom.nextBytes(key); - return new SecretKeySpec(key, "AES"); - } - - private byte[] genInitVector() { - SecureRandom secureRandom = new SecureRandom(); - byte[] iv = new byte[16]; - secureRandom.nextBytes(iv); - return iv; - } - - public static void main(String[] args) { - new SampleCC().start(args); - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/samplecc/node/chaincode_sample.js b/app/platform/fabric/e2e-test/chaincodes/samplecc/node/chaincode_sample.js deleted file mode 100644 index 2a20b93b4..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/samplecc/node/chaincode_sample.js +++ /dev/null @@ -1,104 +0,0 @@ -/* -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -*/ -'use strict'; -const shim = require('fabric-shim'); -const util = require('util'); -const secureRandom = require('secure-random'); -const aesjs = require('aes-js'); - -const AESKeyLength = 32 // AESKeyLength is the default AES key length -const NonceSize = 24 // NonceSize is the default NonceSize - -// This Chaincode allows the following transactions -// "put", "key", val - returns success response -// "get", "key" - returns val stored previously - -let Chaincode = class { - async Init(stub) { - console.info('=========== Sample Chaincode Instantiation Successfull !! ==========='); - return shim.success(); - } - - async Invoke(stub) { - let ret = stub.getFunctionAndParameters(); - console.info(util.format('Args: %j', ret.params)); - let args = ret.params; - let method = this[args[0]]; - if (!method) { - console.log('no function of name:' + ret.fcn + ' found'); - throw new Error('Received unknown function ' + ret.fcn + ' invocation'); - } - try { - let payload = await method(stub, ret.params, this); - return shim.success(payload); - } catch (err) { - console.log(err); - return shim.error(err); - } - } - - async encrypt(key, text) { - let textBytes = aesjs.utils.utf8.toBytes(text); - - // The counter is optional, and if omitted will begin at 1 - let aesCtr = new aesjs.ModeOfOperation.ctr(key, new aesjs.Counter(5)); - let encryptedBytes = aesCtr.encrypt(textBytes); - - // To print or store the binary data, you may convert it to hex - let encryptedHex = aesjs.utils.hex.fromBytes(encryptedBytes); - return encryptedHex; - } - - async decrypt(key, cipherText) { - // When ready to decrypt the hex string, convert it back to bytes - let encryptedBytes = aesjs.utils.hex.toBytes(cipherText); - - let aesCtr = new aesjs.ModeOfOperation.ctr(key, new aesjs.Counter(5)); - let decryptedBytes = aesCtr.decrypt(encryptedBytes); - - // Convert our bytes back into text - let decryptedText = aesjs.utils.utf8.fromBytes(decryptedBytes); - return decryptedText; - } - - // To make the process busy, We just Encrypt and then decrypt the value - async encryptAndDecrypt(arg, thisClass) { - //Generate random AES key of length AESKeyLength - let key = secureRandom.randomBuffer(AESKeyLength); - let method = thisClass['encrypt']; - let cipherText = await method(key, arg); - method = thisClass['decrypt']; - let plainText = await method(key, cipherText); - return Buffer.from(plainText, 'utf8'); - } - - async put(stub, args, thisClass) { - if (args.length != 3) { - return shim.error(util.format('Invalid number of args for \'put\', %j', args.length)); - } - let method = thisClass['encryptAndDecrypt']; - let cryptoArg = await method(args[2], thisClass); - await stub.putState(args[1], cryptoArg); - return Buffer.from('OK', 'utf8'); - } - - async get(stub, args) { - if (args.length != 2) { - return shim.error(util.format('Invalid number of args for \'get\', %j', args.length)); - } - // Get the state from the ledger - let payload = await stub.getState(args[1]); - - if (!payload.toString()) { - let jsonResp = {}; - jsonResp.error = args[1] + ' doesn\'t exist'; - throw new Error(jsonResp); - } - console.log(args[1] + ' : ' + payload.toString()); - return payload; - } -}; -shim.start(new Chaincode()); diff --git a/app/platform/fabric/e2e-test/chaincodes/samplecc/node/package.json b/app/platform/fabric/e2e-test/chaincodes/samplecc/node/package.json deleted file mode 100644 index 921ab2186..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/samplecc/node/package.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "sample_chaincode", - "version": "1.0.0", - "description": "sample chaincode implemented in node.js", - "engines": { - "node": ">=8.4.0", - "npm": ">=5.3.0" - }, - "scripts": { - "start": "node chaincode_sample.js" - }, - "engine-strict": true, - "engineStrict": true, - "license": "Apache-2.0", - "dependencies": { - "aes-js": "^3.1.0", - "fabric-shim": "unstable", - "secure-random": "1.1.1" - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/sbe/chaincode.go b/app/platform/fabric/e2e-test/chaincodes/sbe/chaincode.go deleted file mode 100644 index 3fd6272f0..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/sbe/chaincode.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "encoding/json" - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/core/chaincode/shim/ext/statebased" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// Record is JSON-marshaled as a return value of getRecord -type Record struct { - Key string - Value []byte - Orgs []string -} - -/* - EndorsementCC provides the following chaincode API: - -) updateRecordValue(key, val) sets the value for a given key - there is no return value on success - -) updateRecordEP(key, org1, org2, ..., orgN) sets the endorsement policy for a given key - the endorsement policy is represented by a list of MSP IDs - there is no return value on success - -) getRecord(key) returns a JSON-marshaled Record for the given key -*/ -type EndorsementCC struct { -} - -// Init callback -func (cc *EndorsementCC) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke dispatcher -func (cc *EndorsementCC) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - funcName, _ := stub.GetFunctionAndParameters() - if function, ok := functions[funcName]; ok { - return function(stub) - } - return shim.Error(fmt.Sprintf("Unknown function %s", funcName)) -} - -// function dispatch map used by Invoke() -var functions = map[string]func(stub shim.ChaincodeStubInterface) pb.Response{ - "updateRecordVal": updateRecordValue, - "updateRecordEP": updateRecordEP, - "getRecord": getRecord, -} - -func updateRecordEP(stub shim.ChaincodeStubInterface) pb.Response { - _, parameters := stub.GetFunctionAndParameters() - if len(parameters) < 2 { - return shim.Error("Wrong number of arguments supplied.") - } - - // set the EP - err := setEP(stub, parameters[0], parameters[1:]...) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success([]byte{}) -} - -func updateRecordValue(stub shim.ChaincodeStubInterface) pb.Response { - _, parameters := stub.GetFunctionAndParameters() - if len(parameters) != 2 { - return shim.Error("Wrong number of arguments supplied.") - } - - // set the value - err := stub.PutState(parameters[0], []byte(parameters[1])) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success([]byte{}) -} - -func setEP(stub shim.ChaincodeStubInterface, key string, orgs ...string) error { - ep, err := statebased.NewStateEP(nil) - if err != nil { - return err - } - // add organizations to endorsement policy - err = ep.AddOrgs(statebased.RoleTypePeer, orgs...) - if err != nil { - return err - } - epBytes, err := ep.Policy() - if err != nil { - return err - } - // set the endorsement policy for the key - err = stub.SetStateValidationParameter(key, epBytes) - if err != nil { - return err - } - return nil -} - -func getRecord(stub shim.ChaincodeStubInterface) pb.Response { - _, parameters := stub.GetFunctionAndParameters() - if len(parameters) != 1 { - return shim.Error("Wrong number of arguments supplied.") - } - key := parameters[0] - - // get the endorsement policy for the key - epBytes, err := stub.GetStateValidationParameter(key) - if err != nil { - return shim.Error(err.Error()) - } - ep, err := statebased.NewStateEP(epBytes) - if err != nil { - return shim.Error(err.Error()) - } - - // get the value of the key - val, err := stub.GetState(key) - if err != nil { - return shim.Error(err.Error()) - } - - // put it into the json - r := &Record{ - Key: key, - Value: val, - Orgs: ep.ListOrgs(), - } - rBytes, err := json.Marshal(r) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(rBytes) -} - -func main() { - err := shim.Start(new(EndorsementCC)) - if err != nil { - fmt.Printf("Error starting new cc: %s", err) - } -} \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/chaincodes/sbe/chaincode_test.go b/app/platform/fabric/e2e-test/chaincodes/sbe/chaincode_test.go deleted file mode 100644 index b826954ee..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/sbe/chaincode_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "encoding/json" - "sort" - "testing" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/stretchr/testify/assert" -) - -func TestDispatch(t *testing.T) { - cc := new(EndorsementCC) - stub := shim.NewMockStub("ecc", cc) - - res := stub.MockInvoke("1", [][]byte{[]byte("unknown")}) - assert.Equal(t, int32(shim.ERROR), res.Status) -} - -func TestCreateRecord(t *testing.T) { - cc := new(EndorsementCC) - stub := shim.NewMockStub("ecc", cc) - - // create the record and set its value - res := stub.MockInvoke("1", [][]byte{[]byte("updateRecordVal"), []byte("foo"), []byte("bar")}) - assert.Equal(t, int32(shim.OK), res.Status) - - // retrieve the record - res = stub.MockInvoke("2", [][]byte{[]byte("getRecord"), []byte("foo")}) - assert.Equal(t, int32(shim.OK), res.Status) - - // verify record - var record Record - err := json.Unmarshal(res.Payload, &record) - assert.NoError(t, err) - assert.Equal(t, []byte("bar"), record.Value) -} - -func TestCreateRecordWithEP(t *testing.T) { - cc := new(EndorsementCC) - stub := shim.NewMockStub("ecc", cc) - - // create the record and set its value - res := stub.MockInvoke("1", [][]byte{[]byte("updateRecordVal"), []byte("foo"), []byte("bar")}) - assert.Equal(t, int32(shim.OK), res.Status) - - // set the record's ep - res = stub.MockInvoke("2", [][]byte{[]byte("updateRecordEP"), []byte("foo"), []byte("org1"), []byte("org2")}) - assert.Equal(t, int32(shim.OK), res.Status) - - // retrieve the record - res = stub.MockInvoke("3", [][]byte{[]byte("getRecord"), []byte("foo")}) - assert.Equal(t, int32(shim.OK), res.Status) - - // verify record - var record Record - err := json.Unmarshal(res.Payload, &record) - assert.NoError(t, err) - assert.Equal(t, []byte("bar"), record.Value) - sort.Strings(record.Orgs) - assert.Equal(t, []string{"org1", "org2"}, record.Orgs) -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/data.pb.go b/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/data.pb.go deleted file mode 100644 index 117bc6c09..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/data.pb.go +++ /dev/null @@ -1,55 +0,0 @@ -// Code generated by protoc-gen-go. -// source: data.proto -// DO NOT EDIT! - -/* -Package main is a generated protocol buffer package. - -It is generated from these files: - data.proto - -It has these top-level messages: - PermissionedBlob -*/ -package main - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type PermissionedBlob struct { - Owner []byte `protobuf:"bytes,1,opt,name=Owner,proto3" json:"Owner,omitempty"` - Blob []byte `protobuf:"bytes,2,opt,name=Blob,proto3" json:"Blob,omitempty"` -} - -func (m *PermissionedBlob) Reset() { *m = PermissionedBlob{} } -func (m *PermissionedBlob) String() string { return proto.CompactTextString(m) } -func (*PermissionedBlob) ProtoMessage() {} -func (*PermissionedBlob) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func init() { - proto.RegisterType((*PermissionedBlob)(nil), "main.PermissionedBlob") -} - -func init() { proto.RegisterFile("data.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 96 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x49, 0x2c, 0x49, - 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xc9, 0x4d, 0xcc, 0xcc, 0x53, 0xb2, 0xe1, 0x12, - 0x08, 0x48, 0x2d, 0xca, 0xcd, 0x2c, 0x2e, 0xce, 0xcc, 0xcf, 0x4b, 0x4d, 0x71, 0xca, 0xc9, 0x4f, - 0x12, 0x12, 0xe1, 0x62, 0xf5, 0x2f, 0xcf, 0x4b, 0x2d, 0x92, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, - 0x82, 0x70, 0x84, 0x84, 0xb8, 0x58, 0x40, 0xb2, 0x12, 0x4c, 0x60, 0x41, 0x30, 0x3b, 0x89, 0x0d, - 0x6c, 0x94, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0a, 0xe0, 0xd3, 0xcf, 0x58, 0x00, 0x00, 0x00, -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/data.proto b/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/data.proto deleted file mode 100644 index f0de36901..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/data.proto +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright IBM Corp. 2017 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -syntax = "proto3"; - -package main; - -message PermissionedBlob { - bytes Owner = 1; - bytes Blob = 2; -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/plob.go b/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/plob.go deleted file mode 100644 index 2c3704ff5..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/plob.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package main - -import ( - "bytes" - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - sc "github.com/hyperledger/fabric/protos/peer" - - "github.com/golang/protobuf/proto" -) - -// Define the Smart Contract structure -type PermissionedBlobRegistry struct{} - -// Init is called when the chaincode is instantiatied, for now, a no-op -func (s *PermissionedBlobRegistry) Init(stub shim.ChaincodeStubInterface) sc.Response { - return shim.Success(nil) -} - -// Invoke allows for the manipulation of blobs, either creation or modification -func (s *PermissionedBlobRegistry) Invoke(stub shim.ChaincodeStubInterface) sc.Response { - - args := stub.GetArgs() - - if len(args) == 0 { - return shim.Error("Invoke called with no arguments") - } - - // Route to the appropriate handler function to interact with the ledger appropriately - switch string(args[0]) { - case "query": - return s.queryBlob(stub, args[1:]) - case "set": - return s.setBlob(stub, args[1:]) - case "delete": - return s.deleteBlob(stub, args[1:]) - default: - return shim.Error("Invalid invocation function") - } -} - -// setBlob expects 2 args, id and blob -func (s *PermissionedBlobRegistry) setBlob(stub shim.ChaincodeStubInterface, args [][]byte) sc.Response { - - if len(args) != 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - key := string(args[0]) - - creator, err := stub.GetCreator() - if err != nil { - return shim.Error(fmt.Sprintf("Could not get creator: %s", err)) - } - - valueBytes, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("Could not get state for key %s: %s", key, err)) - } - - // If the key already exists, make sure this user is the owner of that key before allowing a write - if valueBytes != nil { - plob := &PermissionedBlob{} - err = proto.Unmarshal(valueBytes, plob) - if err != nil { - return shim.Error(fmt.Sprintf("Unexpected error unmarshaling: %s", err)) - } - - if !bytes.Equal(plob.Owner, creator) { - return shim.Error(fmt.Sprintf("Not authorized to modify key %s", key)) - } - } - - plob := &PermissionedBlob{ - Owner: creator, - Blob: args[1], - } - - asBytes, err := proto.Marshal(plob) - if err != nil { - return shim.Error(fmt.Sprintf("Unexpected error marshaling: %s", err)) - } - - err = stub.PutState(key, asBytes) - if err != nil { - return shim.Error(fmt.Sprintf("Could not put state for key %s: %s", key, err)) - } - - return shim.Success(nil) -} - -// queryBlob expects 1 arg, the blob's key -func (s *PermissionedBlobRegistry) queryBlob(stub shim.ChaincodeStubInterface, args [][]byte) sc.Response { - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - key := string(args[0]) - - plobBytes, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("Could not get state for key %s: %s", key, err)) - } - - plob := &PermissionedBlob{} - err = proto.Unmarshal(plobBytes, plob) - if err != nil { - return shim.Error(fmt.Sprintf("Unexpected error unmarshaling: %s", err)) - } - - // No need to check permission, anyone who can read the blockchain can also read all the blobs - - return shim.Success(plob.Blob) -} - -// deleteBlob expects 1 arg, the blob's key -func (s *PermissionedBlobRegistry) deleteBlob(stub shim.ChaincodeStubInterface, args [][]byte) sc.Response { - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - creator, err := stub.GetCreator() - if err != nil { - return shim.Error(fmt.Sprintf("Could not get creator: %s", err)) - } - - key := string(args[0]) - - plobBytes, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("Could not get state for key %s: %s", key, err)) - } - - if plobBytes == nil { - return shim.Error(fmt.Sprintf("Key %s did not already exist", key)) - } - - plob := &PermissionedBlob{} - err = proto.Unmarshal(plobBytes, plob) - if err != nil { - shim.Error(fmt.Sprintf("Unexpected error unmarshaling: %s", err)) - } - - // Make sure the requestor is the owner - if !bytes.Equal(plob.Owner, creator) { - return shim.Error(fmt.Sprintf("Not authorized to modify key %s", key)) - } - - err = stub.DelState(key) - if err != nil { - return shim.Error(fmt.Sprintf("Error deleting state for key %s", key)) - } - - return shim.Success(nil) -} - -// main function starts up the chaincode in the container during instantiate -func main() { - if err := shim.Start(new(PermissionedBlobRegistry)); err != nil { - fmt.Printf("Error starting PermissionedBlobRegistry chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/vendor.zip b/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/vendor.zip deleted file mode 100644 index e49efee6b..000000000 Binary files a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.0-shim/chaincode/vendor.zip and /dev/null differ diff --git a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/data.pb.go b/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/data.pb.go deleted file mode 100644 index 117bc6c09..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/data.pb.go +++ /dev/null @@ -1,55 +0,0 @@ -// Code generated by protoc-gen-go. -// source: data.proto -// DO NOT EDIT! - -/* -Package main is a generated protocol buffer package. - -It is generated from these files: - data.proto - -It has these top-level messages: - PermissionedBlob -*/ -package main - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type PermissionedBlob struct { - Owner []byte `protobuf:"bytes,1,opt,name=Owner,proto3" json:"Owner,omitempty"` - Blob []byte `protobuf:"bytes,2,opt,name=Blob,proto3" json:"Blob,omitempty"` -} - -func (m *PermissionedBlob) Reset() { *m = PermissionedBlob{} } -func (m *PermissionedBlob) String() string { return proto.CompactTextString(m) } -func (*PermissionedBlob) ProtoMessage() {} -func (*PermissionedBlob) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func init() { - proto.RegisterType((*PermissionedBlob)(nil), "main.PermissionedBlob") -} - -func init() { proto.RegisterFile("data.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 96 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x49, 0x2c, 0x49, - 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xc9, 0x4d, 0xcc, 0xcc, 0x53, 0xb2, 0xe1, 0x12, - 0x08, 0x48, 0x2d, 0xca, 0xcd, 0x2c, 0x2e, 0xce, 0xcc, 0xcf, 0x4b, 0x4d, 0x71, 0xca, 0xc9, 0x4f, - 0x12, 0x12, 0xe1, 0x62, 0xf5, 0x2f, 0xcf, 0x4b, 0x2d, 0x92, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, - 0x82, 0x70, 0x84, 0x84, 0xb8, 0x58, 0x40, 0xb2, 0x12, 0x4c, 0x60, 0x41, 0x30, 0x3b, 0x89, 0x0d, - 0x6c, 0x94, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0a, 0xe0, 0xd3, 0xcf, 0x58, 0x00, 0x00, 0x00, -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/data.proto b/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/data.proto deleted file mode 100644 index f0de36901..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/data.proto +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright IBM Corp. 2017 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -syntax = "proto3"; - -package main; - -message PermissionedBlob { - bytes Owner = 1; - bytes Blob = 2; -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/plob.go b/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/plob.go deleted file mode 100644 index 2c3704ff5..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/plob.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package main - -import ( - "bytes" - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - sc "github.com/hyperledger/fabric/protos/peer" - - "github.com/golang/protobuf/proto" -) - -// Define the Smart Contract structure -type PermissionedBlobRegistry struct{} - -// Init is called when the chaincode is instantiatied, for now, a no-op -func (s *PermissionedBlobRegistry) Init(stub shim.ChaincodeStubInterface) sc.Response { - return shim.Success(nil) -} - -// Invoke allows for the manipulation of blobs, either creation or modification -func (s *PermissionedBlobRegistry) Invoke(stub shim.ChaincodeStubInterface) sc.Response { - - args := stub.GetArgs() - - if len(args) == 0 { - return shim.Error("Invoke called with no arguments") - } - - // Route to the appropriate handler function to interact with the ledger appropriately - switch string(args[0]) { - case "query": - return s.queryBlob(stub, args[1:]) - case "set": - return s.setBlob(stub, args[1:]) - case "delete": - return s.deleteBlob(stub, args[1:]) - default: - return shim.Error("Invalid invocation function") - } -} - -// setBlob expects 2 args, id and blob -func (s *PermissionedBlobRegistry) setBlob(stub shim.ChaincodeStubInterface, args [][]byte) sc.Response { - - if len(args) != 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - key := string(args[0]) - - creator, err := stub.GetCreator() - if err != nil { - return shim.Error(fmt.Sprintf("Could not get creator: %s", err)) - } - - valueBytes, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("Could not get state for key %s: %s", key, err)) - } - - // If the key already exists, make sure this user is the owner of that key before allowing a write - if valueBytes != nil { - plob := &PermissionedBlob{} - err = proto.Unmarshal(valueBytes, plob) - if err != nil { - return shim.Error(fmt.Sprintf("Unexpected error unmarshaling: %s", err)) - } - - if !bytes.Equal(plob.Owner, creator) { - return shim.Error(fmt.Sprintf("Not authorized to modify key %s", key)) - } - } - - plob := &PermissionedBlob{ - Owner: creator, - Blob: args[1], - } - - asBytes, err := proto.Marshal(plob) - if err != nil { - return shim.Error(fmt.Sprintf("Unexpected error marshaling: %s", err)) - } - - err = stub.PutState(key, asBytes) - if err != nil { - return shim.Error(fmt.Sprintf("Could not put state for key %s: %s", key, err)) - } - - return shim.Success(nil) -} - -// queryBlob expects 1 arg, the blob's key -func (s *PermissionedBlobRegistry) queryBlob(stub shim.ChaincodeStubInterface, args [][]byte) sc.Response { - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - key := string(args[0]) - - plobBytes, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("Could not get state for key %s: %s", key, err)) - } - - plob := &PermissionedBlob{} - err = proto.Unmarshal(plobBytes, plob) - if err != nil { - return shim.Error(fmt.Sprintf("Unexpected error unmarshaling: %s", err)) - } - - // No need to check permission, anyone who can read the blockchain can also read all the blobs - - return shim.Success(plob.Blob) -} - -// deleteBlob expects 1 arg, the blob's key -func (s *PermissionedBlobRegistry) deleteBlob(stub shim.ChaincodeStubInterface, args [][]byte) sc.Response { - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - creator, err := stub.GetCreator() - if err != nil { - return shim.Error(fmt.Sprintf("Could not get creator: %s", err)) - } - - key := string(args[0]) - - plobBytes, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("Could not get state for key %s: %s", key, err)) - } - - if plobBytes == nil { - return shim.Error(fmt.Sprintf("Key %s did not already exist", key)) - } - - plob := &PermissionedBlob{} - err = proto.Unmarshal(plobBytes, plob) - if err != nil { - shim.Error(fmt.Sprintf("Unexpected error unmarshaling: %s", err)) - } - - // Make sure the requestor is the owner - if !bytes.Equal(plob.Owner, creator) { - return shim.Error(fmt.Sprintf("Not authorized to modify key %s", key)) - } - - err = stub.DelState(key) - if err != nil { - return shim.Error(fmt.Sprintf("Error deleting state for key %s", key)) - } - - return shim.Success(nil) -} - -// main function starts up the chaincode in the container during instantiate -func main() { - if err := shim.Start(new(PermissionedBlobRegistry)); err != nil { - fmt.Printf("Error starting PermissionedBlobRegistry chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/vendor.zip b/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/vendor.zip deleted file mode 100644 index a13475fc7..000000000 Binary files a/app/platform/fabric/e2e-test/chaincodes/shim-vendored/plob-1.1-shim/chaincode/vendor.zip and /dev/null differ diff --git a/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/go/shimAPIDriver.go b/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/go/shimAPIDriver.go deleted file mode 100644 index ec557d343..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/go/shimAPIDriver.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -# -# copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# -*/ - -package main - -import ( - "bytes" - "encoding/gob" - "fmt" - "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/common/tools/protolator" - "github.com/hyperledger/fabric/protos/msp" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -// =================================================================================== -// Main -// =================================================================================== -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} - -// Init initializes chaincode -// =========================== -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke - Our entry point for Invocations -// ======================================== -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - - fmt.Println("\n\nex02 Invoke") - - fmt.Printf("Begin*** GetArgs \n") - //check for getArgs and getSttringArgs - args_take1 := stub.GetArgs() - - for key, currArg := range args_take1 { - fmt.Printf("args_take1[%d] := %s\n", key, string(currArg)) - } - fmt.Printf("End*** GetArgs \n\n") - - fmt.Printf("Begin*** GetStringArgs \n") - params := stub.GetStringArgs() - fmt.Printf("args_take2 := %s \n", params) - fmt.Printf("End*** GetStringArgs \n\n") - - fmt.Printf("Begin*** GetArgsSlice \n") - argsSlice, err := stub.GetArgsSlice() - if err != nil { - fmt.Printf("Error in argsSlice := %v \n", err) - } - - if err == nil { - fmt.Printf("argsSlice := %v \n", string(argsSlice)) - } - fmt.Printf("End*** GetArgsSlice\n\n") - - fmt.Printf("Begin*** GetFunctionAndParameters \n") - function, args := stub.GetFunctionAndParameters() - fmt.Printf("function := %s, args := %s \n", function, args) - fmt.Printf("End*** GetFunctionAndParameters\n\n") - - // Handle different functions - if function == "getTxTimeStamp" { - return t.getTxTimeStamp(stub) - } else if function == "getCreator" { - return t.getCreator(stub) - } else if function == "getBinding" { - return t.getBinding(stub) - } else if function == "getSignedProposal" { - return t.getSignedProposal(stub) - } else if function == "getTransient" { - return t.getTransient(stub) - } - - fmt.Println("invoke did not find func: " + function) //error - return shim.Error("Received unknown function invocation") -} - -//=================================================================================================== -// functon getCreator -// You can verify by calling getCreator during initMarble and checking fot the value -// during a transferMarble say -//=================================================================================================== - -func (t *SimpleChaincode) getCreator(stub shim.ChaincodeStubInterface) pb.Response { - - fmt.Printf("\nBegin*** getCreator \n") - creator, err := stub.GetCreator() - if err != nil { - fmt.Printf("GetCreator Error") - return shim.Error(err.Error()) - } - - si := &msp.SerializedIdentity{} - err2 := proto.Unmarshal(creator, si) - if err2 != nil { - fmt.Printf("Proto Unmarshal Error") - return shim.Error(err2.Error()) - } - buf := &bytes.Buffer{} - protolator.DeepMarshalJSON(buf, si) - fmt.Printf("End*** getCreator \n") - fmt.Printf(string(buf.Bytes())) - - return shim.Success([]byte(buf.Bytes())) -} - -//=================================================================================================== -// functon to getBinding -//=================================================================================================== -func (t *SimpleChaincode) getBinding(stub shim.ChaincodeStubInterface) pb.Response { - fmt.Printf("\nBegin*** getBinding \n") - binding, err := stub.GetBinding() - if err != nil { - fmt.Printf("Returning error ****************** ") - return shim.Error(err.Error()) - } else if binding == nil { - fmt.Printf("###### No Transaction Binding is generated ###### ") - return shim.Error("###### No Transaction Binding is generated ###### ") - } - fmt.Printf("\t returned value from stub : %v\n", binding) - fmt.Printf("End*** getBinding \n") - return shim.Success(binding) -} - -//=================================================================================================== -// functon to getTxTimestamp -// in the time that is associated with current invoke on channel -//=================================================================================================== -func (t *SimpleChaincode) getTxTimeStamp(stub shim.ChaincodeStubInterface) pb.Response { - fmt.Printf("\nBegin*** getTxTimeStamp \n") - txTimeAsPtr, err := stub.GetTxTimestamp() - if err != nil { - fmt.Printf("Returning error ******************\n") - return shim.Error(err.Error()) - } - fmt.Printf("\t returned value from stub: %v\n", txTimeAsPtr) - fmt.Printf("\t After converting time to Unix format %s \n", time.Unix(txTimeAsPtr.Seconds, int64(txTimeAsPtr.Nanos)).String()) - fmt.Printf("\nEnd*** getTxTimeStamp \n") - //return shim.Success([]byte(txTimeAsPtr)) - return shim.Success(nil) -} - -//=================================================================================================== -// functon to getTransient -// got to pass these variables during invoke in a transient map -// these values are not stored on ledger -//=================================================================================================== -func (t *SimpleChaincode) getTransient(stub shim.ChaincodeStubInterface) pb.Response { - fmt.Printf("\nBegin*** getTransient \n") - payload, err := stub.GetTransient() - fmt.Printf(" payload from chaincode : %v", payload) - if err != nil { - return shim.Error(err.Error()) - } - for key, currArg := range payload { - fmt.Printf("Inside ... Loop") - fmt.Printf("payload[%d] := %s\n", key, currArg) - } - b, err2 := GetBytes(payload) - if err2 != nil { - return shim.Error(err2.Error()) - } - fmt.Printf("\nEnd*** getTransient \n") - return shim.Success([]byte(b)) -} - -//=================================================================================================== -// functon to getSignedProposal -//=================================================================================================== -func (t *SimpleChaincode) getSignedProposal(stub shim.ChaincodeStubInterface) pb.Response { - fmt.Printf("\nBegin*** getSignedProposal \n") - signedProposal, err := stub.GetSignedProposal() - if err != nil { - fmt.Printf("Returning error ****************** ") - return shim.Error(err.Error()) - } - fmt.Printf("\t returned value from stub: %v", signedProposal) - fmt.Printf("\nEnd*** getSignedProposal \n") - buf := &bytes.Buffer{} - protolator.DeepMarshalJSON(buf, signedProposal) - fmt.Printf(string(buf.Bytes())) - return shim.Success([]byte(buf.Bytes())) -} - -func GetBytes(key interface{}) ([]byte, error) { - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(key) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/node/package.json b/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/node/package.json deleted file mode 100644 index fa938ec34..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/node/package.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "name": "fabcar", - "version": "1.0.0", - "description": "chaincode to demonstrate shim apis in node.js", - "engines": { - "node": ">=8.4.0", - "npm": ">=5.3.0" - }, - "scripts": { - "start": "node shimApiDriver.js" - }, - "engine-strict": true, - "engineStrict": true, - "license": "Apache-2.0", - "dependencies": { - "fabric-shim": "unstable" - } -} diff --git a/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/node/shimApiDriver.js b/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/node/shimApiDriver.js deleted file mode 100644 index a1f52b5a8..000000000 --- a/app/platform/fabric/e2e-test/chaincodes/shimApiDriver/node/shimApiDriver.js +++ /dev/null @@ -1,190 +0,0 @@ -/* -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -*/ - -// This chaincode demonstrates the following ChaincodeStubInterface APIs -// More details can be obtained from the following link: -// https://github.com/hyperledger/fabric-chaincode-node/blob/master/src/lib/stub.js -// -// getArgs -- arguments intended for the chaincode Init and Invoke -// as an array of byte arrays -// getStringArgs -- arguments intended for the chaincode Init and -// Invoke as a string array -// getFunctionAndParameters -- returns the first argument as the function -// name and the rest of the arguments as parameters -// in a string array -// getTxID -- the Transaction ID of the transaction proposal -// invokeChaincode -- call the specified chaincode `Invoke` using the -// same transaction context -// getCreator -- returns `SignatureHeader.Creator` (e.g. an identity) -// of the `SignedProposal` -// getTransient -- It is a map that contains data (e.g. cryptographic material) -// that might be used to implement some form of application-level -// confidentiality. -// getBinding -- returns the transaction binding -// getSignedProposal -- returns the SignedProposal object, which contains all -// data elements part of a transaction proposal -// getTxTimestamp -- returns the timestamp when the transaction was created -// This is extracted from transaction ChannelHeader -// setEvent -- If the transaction is validated and successfully committed, -// the event will be delivered to the current event listeners - -const shim = require('fabric-shim'); -const util = require('util'); -const path = require('path'); - -var Chaincode = class { - - // Initialize the chaincode - async Init(stub) { - let fileName = __filename.slice(__filename.lastIndexOf(path.sep)+1, __filename.length -3); - console.info('========= Instantiated chaincode '+fileName+' ========='); - return shim.success(); - } - - // Invoke, to update or query the ledger in a proposal transaction. - async Invoke(stub) { - let ret = stub.getFunctionAndParameters(); - let method = this[ret.fcn]; - if (!method) { - console.log('no method of name:' + ret.fcn + ' found'); - return shim.success(); - } - try { - let payload = await method(stub, this, ret.params); - return shim.success(payload); - } catch (err) { - console.log(err); - return shim.error(err); - } - } - - // ======================================================== - // Input Sanitation - input checking, look for empty strings - // ======================================================== - sanitizeArgs(args, count) { - if (args.length != count){ - throw new Error('Incorrect number of arguments. Expecting '+count); - } - for (let i=0;i 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/decode.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/deprecated.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index 69de0ea0e..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,38 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/discard.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index dea2617ce..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/encode.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2cf..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/equal.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c1..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/extensions.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index dacdd22d2..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,543 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/lib.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index c076dbdb9..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,959 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/message_set.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index 3b6ca41d5..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,314 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "sync" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad9083..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d9..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/properties.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index dce098e6e..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,535 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_marshal.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index f3a2d16a4..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2767 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_merge.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6a..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index fd4afec8d..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2051 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - } - } - - // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/text.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee725b..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/text_parser.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3af2..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/LICENSE b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/LICENSE deleted file mode 100644 index 8f71f43fe..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/attrmgr/attrmgr.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/attrmgr/attrmgr.go deleted file mode 100644 index a446f5a2f..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/attrmgr/attrmgr.go +++ /dev/null @@ -1,260 +0,0 @@ -/* -Copyright IBM Corp. 2017 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* - * The attrmgr package contains utilities for managing attributes. - * Attributes are added to an X509 certificate as an extension. - */ - -package attrmgr - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/json" - "fmt" - - "github.com/golang/protobuf/proto" - "github.com/hyperledger/fabric/protos/msp" - "github.com/pkg/errors" -) - -var ( - // AttrOID is the ASN.1 object identifier for an attribute extension in an - // X509 certificate - AttrOID = asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6, 7, 8, 1} - // AttrOIDString is the string version of AttrOID - AttrOIDString = "1.2.3.4.5.6.7.8.1" -) - -// Attribute is a name/value pair -type Attribute interface { - // GetName returns the name of the attribute - GetName() string - // GetValue returns the value of the attribute - GetValue() string -} - -// AttributeRequest is a request for an attribute -type AttributeRequest interface { - // GetName returns the name of an attribute - GetName() string - // IsRequired returns true if the attribute is required - IsRequired() bool -} - -// New constructs an attribute manager -func New() *Mgr { return &Mgr{} } - -// Mgr is the attribute manager and is the main object for this package -type Mgr struct{} - -// ProcessAttributeRequestsForCert add attributes to an X509 certificate, given -// attribute requests and attributes. -func (mgr *Mgr) ProcessAttributeRequestsForCert(requests []AttributeRequest, attributes []Attribute, cert *x509.Certificate) error { - attrs, err := mgr.ProcessAttributeRequests(requests, attributes) - if err != nil { - return err - } - return mgr.AddAttributesToCert(attrs, cert) -} - -// ProcessAttributeRequests takes an array of attribute requests and an identity's attributes -// and returns an Attributes object containing the requested attributes. -func (mgr *Mgr) ProcessAttributeRequests(requests []AttributeRequest, attributes []Attribute) (*Attributes, error) { - attrsMap := map[string]string{} - attrs := &Attributes{Attrs: attrsMap} - missingRequiredAttrs := []string{} - // For each of the attribute requests - for _, req := range requests { - // Get the attribute - name := req.GetName() - attr := getAttrByName(name, attributes) - if attr == nil { - if req.IsRequired() { - // Didn't find attribute and it was required; return error below - missingRequiredAttrs = append(missingRequiredAttrs, name) - } - // Skip attribute requests which aren't required - continue - } - attrsMap[name] = attr.GetValue() - } - if len(missingRequiredAttrs) > 0 { - return nil, errors.Errorf("The following required attributes are missing: %+v", - missingRequiredAttrs) - } - return attrs, nil -} - -// AddAttributesToCert adds public attribute info to an X509 certificate. -func (mgr *Mgr) AddAttributesToCert(attrs *Attributes, cert *x509.Certificate) error { - buf, err := json.Marshal(attrs) - if err != nil { - return errors.Wrap(err, "Failed to marshal attributes") - } - ext := pkix.Extension{ - Id: AttrOID, - Critical: false, - Value: buf, - } - cert.Extensions = append(cert.Extensions, ext) - return nil -} - -// GetAttributesFromCert gets the attributes from a certificate. -func (mgr *Mgr) GetAttributesFromCert(cert *x509.Certificate) (*Attributes, error) { - // Get certificate attributes from the certificate if it exists - buf, err := getAttributesFromCert(cert) - if err != nil { - return nil, err - } - // Unmarshal into attributes object - attrs := &Attributes{} - if buf != nil { - err := json.Unmarshal(buf, attrs) - if err != nil { - return nil, errors.Wrap(err, "Failed to unmarshal attributes from certificate") - } - } - return attrs, nil -} - -func (mgr *Mgr) GetAttributesFromIdemix(creator []byte) (*Attributes, error) { - if creator == nil { - return nil, errors.New("creator is nil") - } - - sid := &msp.SerializedIdentity{} - err := proto.Unmarshal(creator, sid) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal transaction invoker's identity") - } - idemixID := &msp.SerializedIdemixIdentity{} - err = proto.Unmarshal(sid.IdBytes, idemixID) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal transaction invoker's idemix identity") - } - // Unmarshal into attributes object - attrs := &Attributes{ - Attrs: make(map[string]string), - } - - ou := &msp.OrganizationUnit{} - err = proto.Unmarshal(idemixID.Ou, ou) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal transaction invoker's ou") - } - attrs.Attrs["ou"] = ou.OrganizationalUnitIdentifier - - role := &msp.MSPRole{} - err = proto.Unmarshal(idemixID.Role, role) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal transaction invoker's role") - } - var roleStr string - switch role.Role { - case 0: - roleStr = "member" - case 1: - roleStr = "admin" - case 2: - roleStr = "client" - case 3: - roleStr = "peer" - } - attrs.Attrs["role"] = roleStr - - return attrs, nil -} - -// Attributes contains attribute names and values -type Attributes struct { - Attrs map[string]string `json:"attrs"` -} - -// Names returns the names of the attributes -func (a *Attributes) Names() []string { - i := 0 - names := make([]string, len(a.Attrs)) - for name := range a.Attrs { - names[i] = name - i++ - } - return names -} - -// Contains returns true if the named attribute is found -func (a *Attributes) Contains(name string) bool { - _, ok := a.Attrs[name] - return ok -} - -// Value returns an attribute's value -func (a *Attributes) Value(name string) (string, bool, error) { - attr, ok := a.Attrs[name] - return attr, ok, nil -} - -// True returns nil if the value of attribute 'name' is true; -// otherwise, an appropriate error is returned. -func (a *Attributes) True(name string) error { - val, ok, err := a.Value(name) - if err != nil { - return err - } - if !ok { - return fmt.Errorf("Attribute '%s' was not found", name) - } - if val != "true" { - return fmt.Errorf("Attribute '%s' is not true", name) - } - return nil -} - -// Get the attribute info from a certificate extension, or return nil if not found -func getAttributesFromCert(cert *x509.Certificate) ([]byte, error) { - for _, ext := range cert.Extensions { - if isAttrOID(ext.Id) { - return ext.Value, nil - } - } - return nil, nil -} - -// Is the object ID equal to the attribute info object ID? -func isAttrOID(oid asn1.ObjectIdentifier) bool { - if len(oid) != len(AttrOID) { - return false - } - for idx, val := range oid { - if val != AttrOID[idx] { - return false - } - } - return true -} - -// Get an attribute from 'attrs' by its name, or nil if not found -func getAttrByName(name string, attrs []Attribute) Attribute { - for _, attr := range attrs { - if attr.GetName() == name { - return attr - } - } - return nil -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/cid/README.md b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/cid/README.md deleted file mode 100644 index b3add7dca..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/cid/README.md +++ /dev/null @@ -1,214 +0,0 @@ -# Client Identity Chaincode Library - -The client identity chaincode library enables you to write chaincode which -makes access control decisions based on the identity of the client -(i.e. the invoker of the chaincode). In particular, you may make access -control decisions based on either or both of the following associated with -the client: - -* the client identity's MSP (Membership Service Provider) ID -* an attribute associated with the client identity - -Attributes are simply name and value pairs associated with an identity. -For example, `email=me@gmail.com` indicates an identity has the `email` -attribute with a value of `me@gmail.com`. - - -## Using the client identity chaincode library - -This section describes how to use the client identity chaincode library. - -All code samples below assume two things: -1. The type of the `stub` variable is `ChaincodeStubInterface` as passed - to your chaincode. -2. You have added the following import statement to your chaincode. - ``` - import "github.com/hyperledger/fabric/core/chaincode/lib/cid" - ``` -#### Getting the client's ID - -The following demonstrates how to get an ID for the client which is guaranteed -to be unique within the MSP: - -``` -id, err := cid.GetID(stub) -``` - -#### Getting the MSP ID - -The following demonstrates how to get the MSP ID of the client's identity: - -``` -mspid, err := cid.GetMSPID(stub) -``` - -#### Getting an attribute value - -The following demonstrates how to get the value of the *attr1* attribute: - -``` -val, ok, err := cid.GetAttributeValue(stub, "attr1") -if err != nil { - // There was an error trying to retrieve the attribute -} -if !ok { - // The client identity does not possess the attribute -} -// Do something with the value of 'val' -``` - -#### Asserting an attribute value - -Often all you want to do is to make an access control decision based on the value -of an attribute, i.e. to assert the value of an attribute. For example, the following -will return an error if the client does not have the `myapp.admin` attribute -with a value of `true`: - -``` -err := cid.AssertAttributeValue(stub, "myapp.admin", "true") -if err != nil { - // Return an error -} -``` - -This is effectively using attributes to implement role-based access control, -or RBAC for short. - -#### Getting the client's X509 certificate - -The following demonstrates how to get the X509 certificate of the client, or -nil if the client's identity was not based on an X509 certificate: - -``` -cert, err := cid.GetX509Certificate(stub) -``` - -Note that both `cert` and `err` may be nil as will be the case if the identity -is not using an X509 certificate. - -#### Performing multiple operations more efficiently - -Sometimes you may need to perform multiple operations in order to make an access -decision. For example, the following demonstrates how to grant access to -identities with MSP *org1MSP* and *attr1* OR with MSP *org1MSP* and *attr2*. - -``` -// Get the client ID object -id, err := cid.New(stub) -if err != nil { - // Handle error -} -mspid, err := id.GetMSPID() -if err != nil { - // Handle error -} -switch mspid { - case "org1MSP": - err = id.AssertAttributeValue("attr1", "true") - case "org2MSP": - err = id.AssertAttributeValue("attr2", "true") - default: - err = errors.New("Wrong MSP") -} -``` -Although it is not required, it is more efficient to make the `cid.New` call -to get the ClientIdentity object if you need to perform multiple operations, -as demonstrated above. - -## Adding Attributes to Identities - -This section describes how to add custom attributes to certificates when -using Hyperledger Fabric CA as well as when using an external CA. - -#### Managing attributes with Fabric CA - -There are two methods of adding attributes to an enrollment certificate -with fabric-ca: - - 1. When you register an identity, you can specify that an enrollment certificate - issued for the identity should by default contain an attribute. This behavior - can be overridden at enrollment time, but this is useful for establishing - default behavior and, assuming registration occurs outside of your application, - does not require any application change. - - The following shows how to register *user1* with two attributes: - *app1Admin* and *email*. - The ":ecert" suffix causes the *appAdmin* attribute to be inserted into user1's - enrollment certificate by default. The *email* attribute is not added - to the enrollment certificate by default. - - ``` - fabric-ca-client register --id.name user1 --id.secret user1pw --id.type user --id.affiliation org1 --id.attrs 'app1Admin=true:ecert,email=user1@gmail.com' - ``` - - 2. When you enroll an identity, you may request that one or more attributes - be added to the certificate. - For each attribute requested, you may specify whether the attribute is - optional or not. If it is not optional but does not exist for the identity, - enrollment fails. - - The following shows how to enroll *user1* with the *email* attribute, - without the *app1Admin* attribute and optionally with the *phone* attribute - (if the user possesses *phone* attribute). - ``` - fabric-ca-client enroll -u http://user1:user1pw@localhost:7054 --enrollment.attrs "email,phone:opt" - ``` -#### Attribute format in a certificate - -Attributes are stored inside an X509 certificate as an extension with an -ASN.1 OID (Abstract Syntax Notation Object IDentifier) -of `1.2.3.4.5.6.7.8.1`. The value of the extension is a JSON string of the -form `{"attrs":{: 0 { - s += "," - } - for j, tv := range rdn { - if j > 0 { - s += "+" - } - typeString := tv.Type.String() - typeName, ok := attributeTypeNames[typeString] - if !ok { - derBytes, err := asn1.Marshal(tv.Value) - if err == nil { - s += typeString + "=#" + hex.EncodeToString(derBytes) - continue // No value escaping necessary. - } - typeName = typeString - } - valueString := fmt.Sprint(tv.Value) - escaped := "" - begin := 0 - for idx, c := range valueString { - if (idx == 0 && (c == ' ' || c == '#')) || - (idx == len(valueString)-1 && c == ' ') { - escaped += valueString[begin:idx] - escaped += "\\" + string(c) - begin = idx + 1 - continue - } - switch c { - case ',', '+', '"', '\\', '<', '>', ';': - escaped += valueString[begin:idx] - escaped += "\\" + string(c) - begin = idx + 1 - } - } - escaped += valueString[begin:] - s += typeName + "=" + escaped - } - } - return s -} - -var attributeTypeNames = map[string]string{ - "2.5.4.6": "C", - "2.5.4.10": "O", - "2.5.4.11": "OU", - "2.5.4.3": "CN", - "2.5.4.5": "SERIALNUMBER", - "2.5.4.7": "L", - "2.5.4.8": "ST", - "2.5.4.9": "STREET", - "2.5.4.17": "POSTALCODE", -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/cid/interfaces.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/cid/interfaces.go deleted file mode 100644 index d5ed94a8c..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/core/chaincode/shim/ext/cid/interfaces.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright IBM Corp. 2017 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cid - -import "crypto/x509" - -// ChaincodeStubInterface is used by deployable chaincode apps to get identity -// of the agent (or user) submitting the transaction. -type ChaincodeStubInterface interface { - // GetCreator returns `SignatureHeader.Creator` (e.g. an identity) - // of the `SignedProposal`. This is the identity of the agent (or user) - // submitting the transaction. - GetCreator() ([]byte, error) -} - -// ClientIdentity represents information about the identity that submitted the -// transaction -type ClientIdentity interface { - - // GetID returns the ID associated with the invoking identity. This ID - // is guaranteed to be unique within the MSP. - GetID() (string, error) - - // Return the MSP ID of the client - GetMSPID() (string, error) - - // GetAttributeValue returns the value of the client's attribute named `attrName`. - // If the client possesses the attribute, `found` is true and `value` equals the - // value of the attribute. - // If the client does not possess the attribute, `found` is false and `value` - // equals "". - GetAttributeValue(attrName string) (value string, found bool, err error) - - // AssertAttributeValue verifies that the client has the attribute named `attrName` - // with a value of `attrValue`; otherwise, an error is returned. - AssertAttributeValue(attrName, attrValue string) error - - // GetX509Certificate returns the X509 certificate associated with the client, - // or nil if it was not identified by an X509 certificate. - GetX509Certificate() (*x509.Certificate, error) -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/identities.pb.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/identities.pb.go deleted file mode 100644 index ddea9c95a..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/identities.pb.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: msp/identities.proto - -package msp // import "github.com/hyperledger/fabric/protos/msp" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// This struct represents an Identity -// (with its MSP identifier) to be used -// to serialize it and deserialize it -type SerializedIdentity struct { - // The identifier of the associated membership service provider - Mspid string `protobuf:"bytes,1,opt,name=mspid,proto3" json:"mspid,omitempty"` - // the Identity, serialized according to the rules of its MPS - IdBytes []byte `protobuf:"bytes,2,opt,name=id_bytes,json=idBytes,proto3" json:"id_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SerializedIdentity) Reset() { *m = SerializedIdentity{} } -func (m *SerializedIdentity) String() string { return proto.CompactTextString(m) } -func (*SerializedIdentity) ProtoMessage() {} -func (*SerializedIdentity) Descriptor() ([]byte, []int) { - return fileDescriptor_identities_8fa8af3e5bf2070a, []int{0} -} -func (m *SerializedIdentity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SerializedIdentity.Unmarshal(m, b) -} -func (m *SerializedIdentity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SerializedIdentity.Marshal(b, m, deterministic) -} -func (dst *SerializedIdentity) XXX_Merge(src proto.Message) { - xxx_messageInfo_SerializedIdentity.Merge(dst, src) -} -func (m *SerializedIdentity) XXX_Size() int { - return xxx_messageInfo_SerializedIdentity.Size(m) -} -func (m *SerializedIdentity) XXX_DiscardUnknown() { - xxx_messageInfo_SerializedIdentity.DiscardUnknown(m) -} - -var xxx_messageInfo_SerializedIdentity proto.InternalMessageInfo - -func (m *SerializedIdentity) GetMspid() string { - if m != nil { - return m.Mspid - } - return "" -} - -func (m *SerializedIdentity) GetIdBytes() []byte { - if m != nil { - return m.IdBytes - } - return nil -} - -// This struct represents an Idemix Identity -// to be used to serialize it and deserialize it. -// The IdemixMSP will first serialize an idemix identity to bytes using -// this proto, and then uses these bytes as id_bytes in SerializedIdentity -type SerializedIdemixIdentity struct { - // nym_x is the X-component of the pseudonym elliptic curve point. - // It is a []byte representation of an amcl.BIG - // The pseudonym can be seen as a public key of the identity, it is used to verify signatures. - NymX []byte `protobuf:"bytes,1,opt,name=nym_x,json=nymX,proto3" json:"nym_x,omitempty"` - // nym_y is the Y-component of the pseudonym elliptic curve point. - // It is a []byte representation of an amcl.BIG - // The pseudonym can be seen as a public key of the identity, it is used to verify signatures. - NymY []byte `protobuf:"bytes,2,opt,name=nym_y,json=nymY,proto3" json:"nym_y,omitempty"` - // ou contains the organizational unit of the idemix identity - Ou []byte `protobuf:"bytes,3,opt,name=ou,proto3" json:"ou,omitempty"` - // role contains the role of this identity (e.g., ADMIN or MEMBER) - Role []byte `protobuf:"bytes,4,opt,name=role,proto3" json:"role,omitempty"` - // proof contains the cryptographic evidence that this identity is valid - Proof []byte `protobuf:"bytes,5,opt,name=proof,proto3" json:"proof,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SerializedIdemixIdentity) Reset() { *m = SerializedIdemixIdentity{} } -func (m *SerializedIdemixIdentity) String() string { return proto.CompactTextString(m) } -func (*SerializedIdemixIdentity) ProtoMessage() {} -func (*SerializedIdemixIdentity) Descriptor() ([]byte, []int) { - return fileDescriptor_identities_8fa8af3e5bf2070a, []int{1} -} -func (m *SerializedIdemixIdentity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SerializedIdemixIdentity.Unmarshal(m, b) -} -func (m *SerializedIdemixIdentity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SerializedIdemixIdentity.Marshal(b, m, deterministic) -} -func (dst *SerializedIdemixIdentity) XXX_Merge(src proto.Message) { - xxx_messageInfo_SerializedIdemixIdentity.Merge(dst, src) -} -func (m *SerializedIdemixIdentity) XXX_Size() int { - return xxx_messageInfo_SerializedIdemixIdentity.Size(m) -} -func (m *SerializedIdemixIdentity) XXX_DiscardUnknown() { - xxx_messageInfo_SerializedIdemixIdentity.DiscardUnknown(m) -} - -var xxx_messageInfo_SerializedIdemixIdentity proto.InternalMessageInfo - -func (m *SerializedIdemixIdentity) GetNymX() []byte { - if m != nil { - return m.NymX - } - return nil -} - -func (m *SerializedIdemixIdentity) GetNymY() []byte { - if m != nil { - return m.NymY - } - return nil -} - -func (m *SerializedIdemixIdentity) GetOu() []byte { - if m != nil { - return m.Ou - } - return nil -} - -func (m *SerializedIdemixIdentity) GetRole() []byte { - if m != nil { - return m.Role - } - return nil -} - -func (m *SerializedIdemixIdentity) GetProof() []byte { - if m != nil { - return m.Proof - } - return nil -} - -func init() { - proto.RegisterType((*SerializedIdentity)(nil), "msp.SerializedIdentity") - proto.RegisterType((*SerializedIdemixIdentity)(nil), "msp.SerializedIdemixIdentity") -} - -func init() { proto.RegisterFile("msp/identities.proto", fileDescriptor_identities_8fa8af3e5bf2070a) } - -var fileDescriptor_identities_8fa8af3e5bf2070a = []byte{ - // 238 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x3f, 0x4f, 0xc3, 0x30, - 0x10, 0x47, 0x95, 0x34, 0xe1, 0x8f, 0x55, 0x31, 0x98, 0x0e, 0x66, 0x2b, 0x9d, 0x32, 0xc5, 0x03, - 0xdf, 0xa0, 0x12, 0x03, 0x03, 0x4b, 0x58, 0x80, 0xa5, 0x6a, 0xea, 0x6b, 0x7a, 0x52, 0x2e, 0x67, - 0xd9, 0x8e, 0x54, 0x33, 0xf0, 0xd9, 0x51, 0x62, 0x40, 0xb0, 0xdd, 0xef, 0xe9, 0xe9, 0xc9, 0x16, - 0x2b, 0xf2, 0x56, 0xa3, 0x81, 0x21, 0x60, 0x40, 0xf0, 0xb5, 0x75, 0x1c, 0x58, 0x2e, 0xc8, 0xdb, - 0xcd, 0xa3, 0x90, 0x2f, 0xe0, 0x70, 0xdf, 0xe3, 0x07, 0x98, 0xa7, 0xa4, 0x44, 0xb9, 0x12, 0x25, - 0x79, 0x8b, 0x46, 0x65, 0xeb, 0xac, 0xba, 0x6e, 0xd2, 0x90, 0x77, 0xe2, 0x0a, 0xcd, 0xae, 0x8d, - 0x01, 0xbc, 0xca, 0xd7, 0x59, 0xb5, 0x6c, 0x2e, 0xd1, 0x6c, 0xa7, 0xb9, 0xf9, 0x14, 0xea, 0x5f, - 0x86, 0xf0, 0xfc, 0x1b, 0xbb, 0x15, 0xe5, 0x10, 0x69, 0x77, 0x9e, 0x63, 0xcb, 0xa6, 0x18, 0x22, - 0xbd, 0xfe, 0xc0, 0xf8, 0x1d, 0x9a, 0xe0, 0x9b, 0xbc, 0x11, 0x39, 0x8f, 0x6a, 0x31, 0x93, 0x9c, - 0x47, 0x29, 0x45, 0xe1, 0xb8, 0x07, 0x55, 0x24, 0x67, 0xba, 0xa7, 0xa7, 0x59, 0xc7, 0x7c, 0x54, - 0xe5, 0x0c, 0xd3, 0xd8, 0x3e, 0x8b, 0x7b, 0x76, 0x5d, 0x7d, 0x8a, 0x16, 0x5c, 0x0f, 0xa6, 0x03, - 0x57, 0x1f, 0xf7, 0xad, 0xc3, 0x43, 0xfa, 0xab, 0xaf, 0xc9, 0xdb, 0xf7, 0xaa, 0xc3, 0x70, 0x1a, - 0xdb, 0xfa, 0xc0, 0xa4, 0xff, 0x98, 0x3a, 0x99, 0x3a, 0x99, 0x9a, 0xbc, 0x6d, 0x2f, 0xe6, 0xfb, - 0xe1, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x13, 0xdc, 0xc8, 0x62, 0x39, 0x01, 0x00, 0x00, -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/identities.proto b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/identities.proto deleted file mode 100644 index fef457c85..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/identities.proto +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - - -syntax = "proto3"; - -option go_package = "github.com/hyperledger/fabric/protos/msp"; -option java_package = "org.hyperledger.fabric.protos.msp"; - -package msp; - -// This struct represents an Identity -// (with its MSP identifier) to be used -// to serialize it and deserialize it -message SerializedIdentity { - // The identifier of the associated membership service provider - string mspid = 1; - - // the Identity, serialized according to the rules of its MPS - bytes id_bytes = 2; -} - -// This struct represents an Idemix Identity -// to be used to serialize it and deserialize it. -// The IdemixMSP will first serialize an idemix identity to bytes using -// this proto, and then uses these bytes as id_bytes in SerializedIdentity -message SerializedIdemixIdentity { - // nym_x is the X-component of the pseudonym elliptic curve point. - // It is a []byte representation of an amcl.BIG - // The pseudonym can be seen as a public key of the identity, it is used to verify signatures. - bytes nym_x = 1; - - // nym_y is the Y-component of the pseudonym elliptic curve point. - // It is a []byte representation of an amcl.BIG - // The pseudonym can be seen as a public key of the identity, it is used to verify signatures. - bytes nym_y = 2; - - // ou contains the organizational unit of the idemix identity - bytes ou = 3; - - // role contains the role of this identity (e.g., ADMIN or MEMBER) - bytes role = 4; - - // proof contains the cryptographic evidence that this identity is valid - bytes proof = 5; -} \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.go deleted file mode 100644 index 9394550c1..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright IBM Corp. 2017 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package msp - -import ( - "fmt" - - "github.com/golang/protobuf/proto" -) - -func (mc *MSPConfig) VariablyOpaqueFields() []string { - return []string{"config"} -} - -func (mc *MSPConfig) VariablyOpaqueFieldProto(name string) (proto.Message, error) { - if name != mc.VariablyOpaqueFields()[0] { - return nil, fmt.Errorf("not a marshaled field: %s", name) - } - switch mc.Type { - case 0: - return &FabricMSPConfig{}, nil - case 1: - return &IdemixMSPConfig{}, nil - default: - return nil, fmt.Errorf("unable to decode MSP type: %v", mc.Type) - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.pb.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.pb.go deleted file mode 100644 index eb9066a65..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.pb.go +++ /dev/null @@ -1,743 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: msp/msp_config.proto - -package msp // import "github.com/hyperledger/fabric/protos/msp" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// MSPConfig collects all the configuration information for -// an MSP. The Config field should be unmarshalled in a way -// that depends on the Type -type MSPConfig struct { - // Type holds the type of the MSP; the default one would - // be of type FABRIC implementing an X.509 based provider - Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` - // Config is MSP dependent configuration info - Config []byte `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MSPConfig) Reset() { *m = MSPConfig{} } -func (m *MSPConfig) String() string { return proto.CompactTextString(m) } -func (*MSPConfig) ProtoMessage() {} -func (*MSPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{0} -} -func (m *MSPConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MSPConfig.Unmarshal(m, b) -} -func (m *MSPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MSPConfig.Marshal(b, m, deterministic) -} -func (dst *MSPConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_MSPConfig.Merge(dst, src) -} -func (m *MSPConfig) XXX_Size() int { - return xxx_messageInfo_MSPConfig.Size(m) -} -func (m *MSPConfig) XXX_DiscardUnknown() { - xxx_messageInfo_MSPConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_MSPConfig proto.InternalMessageInfo - -func (m *MSPConfig) GetType() int32 { - if m != nil { - return m.Type - } - return 0 -} - -func (m *MSPConfig) GetConfig() []byte { - if m != nil { - return m.Config - } - return nil -} - -// FabricMSPConfig collects all the configuration information for -// a Fabric MSP. -// Here we assume a default certificate validation policy, where -// any certificate signed by any of the listed rootCA certs would -// be considered as valid under this MSP. -// This MSP may or may not come with a signing identity. If it does, -// it can also issue signing identities. If it does not, it can only -// be used to validate and verify certificates. -type FabricMSPConfig struct { - // Name holds the identifier of the MSP; MSP identifier - // is chosen by the application that governs this MSP. - // For example, and assuming the default implementation of MSP, - // that is X.509-based and considers a single Issuer, - // this can refer to the Subject OU field or the Issuer OU field. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // List of root certificates trusted by this MSP - // they are used upon certificate validation (see - // comment for IntermediateCerts below) - RootCerts [][]byte `protobuf:"bytes,2,rep,name=root_certs,json=rootCerts,proto3" json:"root_certs,omitempty"` - // List of intermediate certificates trusted by this MSP; - // they are used upon certificate validation as follows: - // validation attempts to build a path from the certificate - // to be validated (which is at one end of the path) and - // one of the certs in the RootCerts field (which is at - // the other end of the path). If the path is longer than - // 2, certificates in the middle are searched within the - // IntermediateCerts pool - IntermediateCerts [][]byte `protobuf:"bytes,3,rep,name=intermediate_certs,json=intermediateCerts,proto3" json:"intermediate_certs,omitempty"` - // Identity denoting the administrator of this MSP - Admins [][]byte `protobuf:"bytes,4,rep,name=admins,proto3" json:"admins,omitempty"` - // Identity revocation list - RevocationList [][]byte `protobuf:"bytes,5,rep,name=revocation_list,json=revocationList,proto3" json:"revocation_list,omitempty"` - // SigningIdentity holds information on the signing identity - // this peer is to use, and which is to be imported by the - // MSP defined before - SigningIdentity *SigningIdentityInfo `protobuf:"bytes,6,opt,name=signing_identity,json=signingIdentity,proto3" json:"signing_identity,omitempty"` - // OrganizationalUnitIdentifiers holds one or more - // fabric organizational unit identifiers that belong to - // this MSP configuration - OrganizationalUnitIdentifiers []*FabricOUIdentifier `protobuf:"bytes,7,rep,name=organizational_unit_identifiers,json=organizationalUnitIdentifiers,proto3" json:"organizational_unit_identifiers,omitempty"` - // FabricCryptoConfig contains the configuration parameters - // for the cryptographic algorithms used by this MSP - CryptoConfig *FabricCryptoConfig `protobuf:"bytes,8,opt,name=crypto_config,json=cryptoConfig,proto3" json:"crypto_config,omitempty"` - // List of TLS root certificates trusted by this MSP. - // They are returned by GetTLSRootCerts. - TlsRootCerts [][]byte `protobuf:"bytes,9,rep,name=tls_root_certs,json=tlsRootCerts,proto3" json:"tls_root_certs,omitempty"` - // List of TLS intermediate certificates trusted by this MSP; - // They are returned by GetTLSIntermediateCerts. - TlsIntermediateCerts [][]byte `protobuf:"bytes,10,rep,name=tls_intermediate_certs,json=tlsIntermediateCerts,proto3" json:"tls_intermediate_certs,omitempty"` - // fabric_node_ous contains the configuration to distinguish clients from peers from orderers - // based on the OUs. - FabricNodeOus *FabricNodeOUs `protobuf:"bytes,11,opt,name=fabric_node_ous,json=fabricNodeOus,proto3" json:"fabric_node_ous,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FabricMSPConfig) Reset() { *m = FabricMSPConfig{} } -func (m *FabricMSPConfig) String() string { return proto.CompactTextString(m) } -func (*FabricMSPConfig) ProtoMessage() {} -func (*FabricMSPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{1} -} -func (m *FabricMSPConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FabricMSPConfig.Unmarshal(m, b) -} -func (m *FabricMSPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FabricMSPConfig.Marshal(b, m, deterministic) -} -func (dst *FabricMSPConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_FabricMSPConfig.Merge(dst, src) -} -func (m *FabricMSPConfig) XXX_Size() int { - return xxx_messageInfo_FabricMSPConfig.Size(m) -} -func (m *FabricMSPConfig) XXX_DiscardUnknown() { - xxx_messageInfo_FabricMSPConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_FabricMSPConfig proto.InternalMessageInfo - -func (m *FabricMSPConfig) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *FabricMSPConfig) GetRootCerts() [][]byte { - if m != nil { - return m.RootCerts - } - return nil -} - -func (m *FabricMSPConfig) GetIntermediateCerts() [][]byte { - if m != nil { - return m.IntermediateCerts - } - return nil -} - -func (m *FabricMSPConfig) GetAdmins() [][]byte { - if m != nil { - return m.Admins - } - return nil -} - -func (m *FabricMSPConfig) GetRevocationList() [][]byte { - if m != nil { - return m.RevocationList - } - return nil -} - -func (m *FabricMSPConfig) GetSigningIdentity() *SigningIdentityInfo { - if m != nil { - return m.SigningIdentity - } - return nil -} - -func (m *FabricMSPConfig) GetOrganizationalUnitIdentifiers() []*FabricOUIdentifier { - if m != nil { - return m.OrganizationalUnitIdentifiers - } - return nil -} - -func (m *FabricMSPConfig) GetCryptoConfig() *FabricCryptoConfig { - if m != nil { - return m.CryptoConfig - } - return nil -} - -func (m *FabricMSPConfig) GetTlsRootCerts() [][]byte { - if m != nil { - return m.TlsRootCerts - } - return nil -} - -func (m *FabricMSPConfig) GetTlsIntermediateCerts() [][]byte { - if m != nil { - return m.TlsIntermediateCerts - } - return nil -} - -func (m *FabricMSPConfig) GetFabricNodeOus() *FabricNodeOUs { - if m != nil { - return m.FabricNodeOus - } - return nil -} - -// FabricCryptoConfig contains configuration parameters -// for the cryptographic algorithms used by the MSP -// this configuration refers to -type FabricCryptoConfig struct { - // SignatureHashFamily is a string representing the hash family to be used - // during sign and verify operations. - // Allowed values are "SHA2" and "SHA3". - SignatureHashFamily string `protobuf:"bytes,1,opt,name=signature_hash_family,json=signatureHashFamily,proto3" json:"signature_hash_family,omitempty"` - // IdentityIdentifierHashFunction is a string representing the hash function - // to be used during the computation of the identity identifier of an MSP identity. - // Allowed values are "SHA256", "SHA384" and "SHA3_256", "SHA3_384". - IdentityIdentifierHashFunction string `protobuf:"bytes,2,opt,name=identity_identifier_hash_function,json=identityIdentifierHashFunction,proto3" json:"identity_identifier_hash_function,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FabricCryptoConfig) Reset() { *m = FabricCryptoConfig{} } -func (m *FabricCryptoConfig) String() string { return proto.CompactTextString(m) } -func (*FabricCryptoConfig) ProtoMessage() {} -func (*FabricCryptoConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{2} -} -func (m *FabricCryptoConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FabricCryptoConfig.Unmarshal(m, b) -} -func (m *FabricCryptoConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FabricCryptoConfig.Marshal(b, m, deterministic) -} -func (dst *FabricCryptoConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_FabricCryptoConfig.Merge(dst, src) -} -func (m *FabricCryptoConfig) XXX_Size() int { - return xxx_messageInfo_FabricCryptoConfig.Size(m) -} -func (m *FabricCryptoConfig) XXX_DiscardUnknown() { - xxx_messageInfo_FabricCryptoConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_FabricCryptoConfig proto.InternalMessageInfo - -func (m *FabricCryptoConfig) GetSignatureHashFamily() string { - if m != nil { - return m.SignatureHashFamily - } - return "" -} - -func (m *FabricCryptoConfig) GetIdentityIdentifierHashFunction() string { - if m != nil { - return m.IdentityIdentifierHashFunction - } - return "" -} - -// IdemixMSPConfig collects all the configuration information for -// an Idemix MSP. -type IdemixMSPConfig struct { - // Name holds the identifier of the MSP - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // ipk represents the (serialized) issuer public key - Ipk []byte `protobuf:"bytes,2,opt,name=ipk,proto3" json:"ipk,omitempty"` - // signer may contain crypto material to configure a default signer - Signer *IdemixMSPSignerConfig `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` - // revocation_pk is the public key used for revocation of credentials - RevocationPk []byte `protobuf:"bytes,4,opt,name=revocation_pk,json=revocationPk,proto3" json:"revocation_pk,omitempty"` - // epoch represents the current epoch (time interval) used for revocation - Epoch int64 `protobuf:"varint,5,opt,name=epoch,proto3" json:"epoch,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IdemixMSPConfig) Reset() { *m = IdemixMSPConfig{} } -func (m *IdemixMSPConfig) String() string { return proto.CompactTextString(m) } -func (*IdemixMSPConfig) ProtoMessage() {} -func (*IdemixMSPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{3} -} -func (m *IdemixMSPConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IdemixMSPConfig.Unmarshal(m, b) -} -func (m *IdemixMSPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IdemixMSPConfig.Marshal(b, m, deterministic) -} -func (dst *IdemixMSPConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_IdemixMSPConfig.Merge(dst, src) -} -func (m *IdemixMSPConfig) XXX_Size() int { - return xxx_messageInfo_IdemixMSPConfig.Size(m) -} -func (m *IdemixMSPConfig) XXX_DiscardUnknown() { - xxx_messageInfo_IdemixMSPConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_IdemixMSPConfig proto.InternalMessageInfo - -func (m *IdemixMSPConfig) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *IdemixMSPConfig) GetIpk() []byte { - if m != nil { - return m.Ipk - } - return nil -} - -func (m *IdemixMSPConfig) GetSigner() *IdemixMSPSignerConfig { - if m != nil { - return m.Signer - } - return nil -} - -func (m *IdemixMSPConfig) GetRevocationPk() []byte { - if m != nil { - return m.RevocationPk - } - return nil -} - -func (m *IdemixMSPConfig) GetEpoch() int64 { - if m != nil { - return m.Epoch - } - return 0 -} - -// IdemixMSPSIgnerConfig contains the crypto material to set up an idemix signing identity -type IdemixMSPSignerConfig struct { - // cred represents the serialized idemix credential of the default signer - Cred []byte `protobuf:"bytes,1,opt,name=cred,proto3" json:"cred,omitempty"` - // sk is the secret key of the default signer, corresponding to credential Cred - Sk []byte `protobuf:"bytes,2,opt,name=sk,proto3" json:"sk,omitempty"` - // organizational_unit_identifier defines the organizational unit the default signer is in - OrganizationalUnitIdentifier string `protobuf:"bytes,3,opt,name=organizational_unit_identifier,json=organizationalUnitIdentifier,proto3" json:"organizational_unit_identifier,omitempty"` - // role defines whether the default signer is admin, peer, member or client - Role int32 `protobuf:"varint,4,opt,name=role,proto3" json:"role,omitempty"` - // enrollment_id contains the enrollment id of this signer - EnrollmentId string `protobuf:"bytes,5,opt,name=enrollment_id,json=enrollmentId,proto3" json:"enrollment_id,omitempty"` - // credential_revocation_information contains a serialized CredentialRevocationInformation - CredentialRevocationInformation []byte `protobuf:"bytes,6,opt,name=credential_revocation_information,json=credentialRevocationInformation,proto3" json:"credential_revocation_information,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IdemixMSPSignerConfig) Reset() { *m = IdemixMSPSignerConfig{} } -func (m *IdemixMSPSignerConfig) String() string { return proto.CompactTextString(m) } -func (*IdemixMSPSignerConfig) ProtoMessage() {} -func (*IdemixMSPSignerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{4} -} -func (m *IdemixMSPSignerConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IdemixMSPSignerConfig.Unmarshal(m, b) -} -func (m *IdemixMSPSignerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IdemixMSPSignerConfig.Marshal(b, m, deterministic) -} -func (dst *IdemixMSPSignerConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_IdemixMSPSignerConfig.Merge(dst, src) -} -func (m *IdemixMSPSignerConfig) XXX_Size() int { - return xxx_messageInfo_IdemixMSPSignerConfig.Size(m) -} -func (m *IdemixMSPSignerConfig) XXX_DiscardUnknown() { - xxx_messageInfo_IdemixMSPSignerConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_IdemixMSPSignerConfig proto.InternalMessageInfo - -func (m *IdemixMSPSignerConfig) GetCred() []byte { - if m != nil { - return m.Cred - } - return nil -} - -func (m *IdemixMSPSignerConfig) GetSk() []byte { - if m != nil { - return m.Sk - } - return nil -} - -func (m *IdemixMSPSignerConfig) GetOrganizationalUnitIdentifier() string { - if m != nil { - return m.OrganizationalUnitIdentifier - } - return "" -} - -func (m *IdemixMSPSignerConfig) GetRole() int32 { - if m != nil { - return m.Role - } - return 0 -} - -func (m *IdemixMSPSignerConfig) GetEnrollmentId() string { - if m != nil { - return m.EnrollmentId - } - return "" -} - -func (m *IdemixMSPSignerConfig) GetCredentialRevocationInformation() []byte { - if m != nil { - return m.CredentialRevocationInformation - } - return nil -} - -// SigningIdentityInfo represents the configuration information -// related to the signing identity the peer is to use for generating -// endorsements -type SigningIdentityInfo struct { - // PublicSigner carries the public information of the signing - // identity. For an X.509 provider this would be represented by - // an X.509 certificate - PublicSigner []byte `protobuf:"bytes,1,opt,name=public_signer,json=publicSigner,proto3" json:"public_signer,omitempty"` - // PrivateSigner denotes a reference to the private key of the - // peer's signing identity - PrivateSigner *KeyInfo `protobuf:"bytes,2,opt,name=private_signer,json=privateSigner,proto3" json:"private_signer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SigningIdentityInfo) Reset() { *m = SigningIdentityInfo{} } -func (m *SigningIdentityInfo) String() string { return proto.CompactTextString(m) } -func (*SigningIdentityInfo) ProtoMessage() {} -func (*SigningIdentityInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{5} -} -func (m *SigningIdentityInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SigningIdentityInfo.Unmarshal(m, b) -} -func (m *SigningIdentityInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SigningIdentityInfo.Marshal(b, m, deterministic) -} -func (dst *SigningIdentityInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SigningIdentityInfo.Merge(dst, src) -} -func (m *SigningIdentityInfo) XXX_Size() int { - return xxx_messageInfo_SigningIdentityInfo.Size(m) -} -func (m *SigningIdentityInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SigningIdentityInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SigningIdentityInfo proto.InternalMessageInfo - -func (m *SigningIdentityInfo) GetPublicSigner() []byte { - if m != nil { - return m.PublicSigner - } - return nil -} - -func (m *SigningIdentityInfo) GetPrivateSigner() *KeyInfo { - if m != nil { - return m.PrivateSigner - } - return nil -} - -// KeyInfo represents a (secret) key that is either already stored -// in the bccsp/keystore or key material to be imported to the -// bccsp key-store. In later versions it may contain also a -// keystore identifier -type KeyInfo struct { - // Identifier of the key inside the default keystore; this for - // the case of Software BCCSP as well as the HSM BCCSP would be - // the SKI of the key - KeyIdentifier string `protobuf:"bytes,1,opt,name=key_identifier,json=keyIdentifier,proto3" json:"key_identifier,omitempty"` - // KeyMaterial (optional) for the key to be imported; this is - // properly encoded key bytes, prefixed by the type of the key - KeyMaterial []byte `protobuf:"bytes,2,opt,name=key_material,json=keyMaterial,proto3" json:"key_material,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyInfo) Reset() { *m = KeyInfo{} } -func (m *KeyInfo) String() string { return proto.CompactTextString(m) } -func (*KeyInfo) ProtoMessage() {} -func (*KeyInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{6} -} -func (m *KeyInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_KeyInfo.Unmarshal(m, b) -} -func (m *KeyInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_KeyInfo.Marshal(b, m, deterministic) -} -func (dst *KeyInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyInfo.Merge(dst, src) -} -func (m *KeyInfo) XXX_Size() int { - return xxx_messageInfo_KeyInfo.Size(m) -} -func (m *KeyInfo) XXX_DiscardUnknown() { - xxx_messageInfo_KeyInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_KeyInfo proto.InternalMessageInfo - -func (m *KeyInfo) GetKeyIdentifier() string { - if m != nil { - return m.KeyIdentifier - } - return "" -} - -func (m *KeyInfo) GetKeyMaterial() []byte { - if m != nil { - return m.KeyMaterial - } - return nil -} - -// FabricOUIdentifier represents an organizational unit and -// its related chain of trust identifier. -type FabricOUIdentifier struct { - // Certificate represents the second certificate in a certification chain. - // (Notice that the first certificate in a certification chain is supposed - // to be the certificate of an identity). - // It must correspond to the certificate of root or intermediate CA - // recognized by the MSP this message belongs to. - // Starting from this certificate, a certification chain is computed - // and bound to the OrganizationUnitIdentifier specified - Certificate []byte `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"` - // OrganizationUnitIdentifier defines the organizational unit under the - // MSP identified with MSPIdentifier - OrganizationalUnitIdentifier string `protobuf:"bytes,2,opt,name=organizational_unit_identifier,json=organizationalUnitIdentifier,proto3" json:"organizational_unit_identifier,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FabricOUIdentifier) Reset() { *m = FabricOUIdentifier{} } -func (m *FabricOUIdentifier) String() string { return proto.CompactTextString(m) } -func (*FabricOUIdentifier) ProtoMessage() {} -func (*FabricOUIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{7} -} -func (m *FabricOUIdentifier) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FabricOUIdentifier.Unmarshal(m, b) -} -func (m *FabricOUIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FabricOUIdentifier.Marshal(b, m, deterministic) -} -func (dst *FabricOUIdentifier) XXX_Merge(src proto.Message) { - xxx_messageInfo_FabricOUIdentifier.Merge(dst, src) -} -func (m *FabricOUIdentifier) XXX_Size() int { - return xxx_messageInfo_FabricOUIdentifier.Size(m) -} -func (m *FabricOUIdentifier) XXX_DiscardUnknown() { - xxx_messageInfo_FabricOUIdentifier.DiscardUnknown(m) -} - -var xxx_messageInfo_FabricOUIdentifier proto.InternalMessageInfo - -func (m *FabricOUIdentifier) GetCertificate() []byte { - if m != nil { - return m.Certificate - } - return nil -} - -func (m *FabricOUIdentifier) GetOrganizationalUnitIdentifier() string { - if m != nil { - return m.OrganizationalUnitIdentifier - } - return "" -} - -// FabricNodeOUs contains configuration to tell apart clients from peers from orderers -// based on OUs. If NodeOUs recognition is enabled then an msp identity -// that does not contain any of the specified OU will be considered invalid. -type FabricNodeOUs struct { - // If true then an msp identity that does not contain any of the specified OU will be considered invalid. - Enable bool `protobuf:"varint,1,opt,name=enable,proto3" json:"enable,omitempty"` - // OU Identifier of the clients - ClientOuIdentifier *FabricOUIdentifier `protobuf:"bytes,2,opt,name=client_ou_identifier,json=clientOuIdentifier,proto3" json:"client_ou_identifier,omitempty"` - // OU Identifier of the peers - PeerOuIdentifier *FabricOUIdentifier `protobuf:"bytes,3,opt,name=peer_ou_identifier,json=peerOuIdentifier,proto3" json:"peer_ou_identifier,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FabricNodeOUs) Reset() { *m = FabricNodeOUs{} } -func (m *FabricNodeOUs) String() string { return proto.CompactTextString(m) } -func (*FabricNodeOUs) ProtoMessage() {} -func (*FabricNodeOUs) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_config_e749e5bd1d6d997b, []int{8} -} -func (m *FabricNodeOUs) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FabricNodeOUs.Unmarshal(m, b) -} -func (m *FabricNodeOUs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FabricNodeOUs.Marshal(b, m, deterministic) -} -func (dst *FabricNodeOUs) XXX_Merge(src proto.Message) { - xxx_messageInfo_FabricNodeOUs.Merge(dst, src) -} -func (m *FabricNodeOUs) XXX_Size() int { - return xxx_messageInfo_FabricNodeOUs.Size(m) -} -func (m *FabricNodeOUs) XXX_DiscardUnknown() { - xxx_messageInfo_FabricNodeOUs.DiscardUnknown(m) -} - -var xxx_messageInfo_FabricNodeOUs proto.InternalMessageInfo - -func (m *FabricNodeOUs) GetEnable() bool { - if m != nil { - return m.Enable - } - return false -} - -func (m *FabricNodeOUs) GetClientOuIdentifier() *FabricOUIdentifier { - if m != nil { - return m.ClientOuIdentifier - } - return nil -} - -func (m *FabricNodeOUs) GetPeerOuIdentifier() *FabricOUIdentifier { - if m != nil { - return m.PeerOuIdentifier - } - return nil -} - -func init() { - proto.RegisterType((*MSPConfig)(nil), "msp.MSPConfig") - proto.RegisterType((*FabricMSPConfig)(nil), "msp.FabricMSPConfig") - proto.RegisterType((*FabricCryptoConfig)(nil), "msp.FabricCryptoConfig") - proto.RegisterType((*IdemixMSPConfig)(nil), "msp.IdemixMSPConfig") - proto.RegisterType((*IdemixMSPSignerConfig)(nil), "msp.IdemixMSPSignerConfig") - proto.RegisterType((*SigningIdentityInfo)(nil), "msp.SigningIdentityInfo") - proto.RegisterType((*KeyInfo)(nil), "msp.KeyInfo") - proto.RegisterType((*FabricOUIdentifier)(nil), "msp.FabricOUIdentifier") - proto.RegisterType((*FabricNodeOUs)(nil), "msp.FabricNodeOUs") -} - -func init() { proto.RegisterFile("msp/msp_config.proto", fileDescriptor_msp_config_e749e5bd1d6d997b) } - -var fileDescriptor_msp_config_e749e5bd1d6d997b = []byte{ - // 847 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x5f, 0x6f, 0xe3, 0x44, - 0x10, 0x57, 0x92, 0x26, 0x77, 0x99, 0x38, 0x49, 0xd9, 0xeb, 0x15, 0x0b, 0x71, 0x77, 0xa9, 0x01, - 0x91, 0x17, 0x52, 0xa9, 0x87, 0x84, 0x84, 0x78, 0xba, 0xc2, 0x09, 0x03, 0xa5, 0xd5, 0x56, 0x7d, - 0xe1, 0xc5, 0xda, 0xd8, 0x9b, 0x64, 0x65, 0x7b, 0xd7, 0xda, 0x5d, 0x9f, 0x08, 0xe2, 0x99, 0x2f, - 0xc0, 0x77, 0xe0, 0x99, 0x57, 0xbe, 0x1d, 0xda, 0x3f, 0x8d, 0x9d, 0x6b, 0x15, 0x78, 0x9b, 0x9d, - 0xf9, 0xcd, 0xcf, 0xb3, 0xbf, 0x99, 0x59, 0xc3, 0x49, 0xa9, 0xaa, 0xf3, 0x52, 0x55, 0x49, 0x2a, - 0xf8, 0x8a, 0xad, 0x17, 0x95, 0x14, 0x5a, 0xa0, 0x5e, 0xa9, 0xaa, 0xe8, 0x2b, 0x18, 0x5e, 0xdd, - 0xde, 0x5c, 0x5a, 0x3f, 0x42, 0x70, 0xa4, 0xb7, 0x15, 0x0d, 0x3b, 0xb3, 0xce, 0xbc, 0x8f, 0xad, - 0x8d, 0x4e, 0x61, 0xe0, 0xb2, 0xc2, 0xee, 0xac, 0x33, 0x0f, 0xb0, 0x3f, 0x45, 0x7f, 0x1f, 0xc1, - 0xf4, 0x2d, 0x59, 0x4a, 0x96, 0xee, 0xe5, 0x73, 0x52, 0xba, 0xfc, 0x21, 0xb6, 0x36, 0x7a, 0x01, - 0x20, 0x85, 0xd0, 0x49, 0x4a, 0xa5, 0x56, 0x61, 0x77, 0xd6, 0x9b, 0x07, 0x78, 0x68, 0x3c, 0x97, - 0xc6, 0x81, 0xbe, 0x00, 0xc4, 0xb8, 0xa6, 0xb2, 0xa4, 0x19, 0x23, 0x9a, 0x7a, 0x58, 0xcf, 0xc2, - 0x3e, 0x68, 0x47, 0x1c, 0xfc, 0x14, 0x06, 0x24, 0x2b, 0x19, 0x57, 0xe1, 0x91, 0x85, 0xf8, 0x13, - 0xfa, 0x1c, 0xa6, 0x92, 0xbe, 0x13, 0x29, 0xd1, 0x4c, 0xf0, 0xa4, 0x60, 0x4a, 0x87, 0x7d, 0x0b, - 0x98, 0x34, 0xee, 0x9f, 0x98, 0xd2, 0xe8, 0x12, 0x8e, 0x15, 0x5b, 0x73, 0xc6, 0xd7, 0x09, 0xcb, - 0x28, 0xd7, 0x4c, 0x6f, 0xc3, 0xc1, 0xac, 0x33, 0x1f, 0x5d, 0x84, 0x8b, 0x52, 0x55, 0x8b, 0x5b, - 0x17, 0x8c, 0x7d, 0x2c, 0xe6, 0x2b, 0x81, 0xa7, 0x6a, 0xdf, 0x89, 0x12, 0x78, 0x25, 0xe4, 0x9a, - 0x70, 0xf6, 0x9b, 0x25, 0x26, 0x45, 0x52, 0x73, 0xa6, 0x3d, 0xe1, 0x8a, 0x51, 0xa9, 0xc2, 0x27, - 0xb3, 0xde, 0x7c, 0x74, 0xf1, 0xa1, 0xe5, 0x74, 0x32, 0x5d, 0xdf, 0xc5, 0xbb, 0x38, 0x7e, 0xb1, - 0x9f, 0x7f, 0xc7, 0x99, 0x6e, 0xa2, 0x0a, 0x7d, 0x03, 0xe3, 0x54, 0x6e, 0x2b, 0x2d, 0x7c, 0xc7, - 0xc2, 0xa7, 0xb6, 0xc4, 0x36, 0xdd, 0xa5, 0x8d, 0x3b, 0xe1, 0x71, 0x90, 0xb6, 0x4e, 0xe8, 0x53, - 0x98, 0xe8, 0x42, 0x25, 0x2d, 0xd9, 0x87, 0x56, 0x8b, 0x40, 0x17, 0x0a, 0xef, 0x94, 0xff, 0x12, - 0x4e, 0x0d, 0xea, 0x11, 0xf5, 0xc1, 0xa2, 0x4f, 0x74, 0xa1, 0xe2, 0x07, 0x0d, 0xf8, 0x1a, 0xa6, - 0x2b, 0xfb, 0xfd, 0x84, 0x8b, 0x8c, 0x26, 0xa2, 0x56, 0xe1, 0xc8, 0xd6, 0x86, 0x5a, 0xb5, 0xfd, - 0x2c, 0x32, 0x7a, 0x7d, 0xa7, 0xf0, 0x78, 0xd5, 0x1c, 0x6b, 0x15, 0xfd, 0xd9, 0x01, 0xf4, 0xb0, - 0x78, 0x74, 0x01, 0xcf, 0x8d, 0xc0, 0x44, 0xd7, 0x92, 0x26, 0x1b, 0xa2, 0x36, 0xc9, 0x8a, 0x94, - 0xac, 0xd8, 0xfa, 0x31, 0x7a, 0xb6, 0x0b, 0x7e, 0x4f, 0xd4, 0xe6, 0xad, 0x0d, 0xa1, 0x18, 0xce, - 0xee, 0xdb, 0xd7, 0x92, 0xdd, 0x67, 0xd7, 0x3c, 0x35, 0xb2, 0xda, 0x81, 0x1d, 0xe2, 0x97, 0xf7, - 0xc0, 0x46, 0x60, 0x4b, 0xe4, 0x51, 0xd1, 0x5f, 0x1d, 0x98, 0xc6, 0x19, 0x2d, 0xd9, 0xaf, 0x87, - 0x07, 0xf9, 0x18, 0x7a, 0xac, 0xca, 0xfd, 0x16, 0x18, 0x13, 0x5d, 0xc0, 0xc0, 0xd4, 0x46, 0x65, - 0xd8, 0xb3, 0x12, 0x7c, 0x64, 0x25, 0xd8, 0x71, 0xdd, 0xda, 0x98, 0xef, 0x90, 0x47, 0xa2, 0x4f, - 0x60, 0xdc, 0x1a, 0xd4, 0x2a, 0x0f, 0x8f, 0x2c, 0x5f, 0xd0, 0x38, 0x6f, 0x72, 0x74, 0x02, 0x7d, - 0x5a, 0x89, 0x74, 0x13, 0xf6, 0x67, 0x9d, 0x79, 0x0f, 0xbb, 0x43, 0xf4, 0x47, 0x17, 0x9e, 0x3f, - 0x4a, 0x6e, 0xca, 0x4d, 0x25, 0xcd, 0x6c, 0xb9, 0x01, 0xb6, 0x36, 0x9a, 0x40, 0x57, 0xdd, 0x57, - 0xdb, 0x55, 0x39, 0xfa, 0x16, 0x5e, 0x1e, 0x9e, 0x59, 0x7b, 0x89, 0x21, 0xfe, 0xf8, 0xd0, 0x64, - 0x9a, 0x2f, 0x49, 0x51, 0x50, 0x5b, 0x75, 0x1f, 0x5b, 0xdb, 0x5c, 0x89, 0x72, 0x29, 0x8a, 0xa2, - 0xa4, 0xdc, 0x10, 0xda, 0xaa, 0x87, 0x38, 0x68, 0x9c, 0x71, 0x86, 0x7e, 0x80, 0x33, 0x53, 0x96, - 0x21, 0x22, 0x45, 0xd2, 0x92, 0x80, 0xf1, 0x95, 0x90, 0xa5, 0xb5, 0xed, 0x22, 0x06, 0xf8, 0x55, - 0x03, 0xc4, 0x3b, 0x5c, 0xdc, 0xc0, 0x22, 0x01, 0xcf, 0x1e, 0x59, 0x53, 0x53, 0x47, 0x55, 0x2f, - 0x0b, 0x96, 0x26, 0xbe, 0x2b, 0x4e, 0x8e, 0xc0, 0x39, 0x9d, 0x60, 0xe8, 0x35, 0x4c, 0x2a, 0xc9, - 0xde, 0x99, 0x61, 0xf7, 0xa8, 0xae, 0xed, 0x5d, 0x60, 0x7b, 0xf7, 0x23, 0x75, 0x1b, 0x3f, 0xf6, - 0x18, 0x97, 0x14, 0xdd, 0xc2, 0x13, 0x1f, 0x41, 0x9f, 0xc1, 0x24, 0xa7, 0xed, 0x99, 0xf3, 0x33, - 0x32, 0xce, 0x69, 0x6b, 0xc0, 0xd0, 0x19, 0x04, 0x06, 0x56, 0x12, 0x4d, 0x25, 0x23, 0x85, 0xef, - 0xc3, 0x28, 0xa7, 0xdb, 0x2b, 0xef, 0x8a, 0x7e, 0xbf, 0x5f, 0x86, 0xf6, 0xc3, 0x80, 0x66, 0x30, - 0x32, 0x4b, 0xc8, 0x56, 0x2c, 0x25, 0x9a, 0xfa, 0x2b, 0xb4, 0x5d, 0xff, 0xa3, 0x91, 0xdd, 0xff, - 0x6e, 0x64, 0xf4, 0x4f, 0x07, 0xc6, 0x7b, 0xcb, 0x6a, 0x9e, 0x56, 0xca, 0xc9, 0xb2, 0x70, 0x1f, - 0x7d, 0x8a, 0xfd, 0x09, 0xc5, 0x70, 0x92, 0x16, 0xcc, 0xb4, 0x56, 0xd4, 0xef, 0x7f, 0xe5, 0xc0, - 0x0b, 0x87, 0x5c, 0xd2, 0x75, 0xdd, 0xba, 0xdc, 0x77, 0x80, 0x2a, 0x4a, 0xe5, 0x7b, 0x44, 0xbd, - 0xc3, 0x44, 0xc7, 0x26, 0xa5, 0x4d, 0xf3, 0x26, 0x81, 0x33, 0x21, 0xd7, 0x8b, 0xcd, 0xb6, 0xa2, - 0xb2, 0xa0, 0xd9, 0x9a, 0xca, 0x85, 0x7b, 0x68, 0xdc, 0x8f, 0x4d, 0x19, 0xa6, 0x37, 0xc7, 0x57, - 0xaa, 0x72, 0xeb, 0x71, 0x43, 0xd2, 0x9c, 0xac, 0xe9, 0x2f, 0xf3, 0x35, 0xd3, 0x9b, 0x7a, 0xb9, - 0x48, 0x45, 0x79, 0xde, 0xca, 0x3d, 0x77, 0xb9, 0xe7, 0x2e, 0xd7, 0xfc, 0x26, 0x97, 0x03, 0x6b, - 0xbf, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x54, 0x67, 0x46, 0xdb, 0x38, 0x07, 0x00, 0x00, -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.proto b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.proto deleted file mode 100644 index 542f06d82..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_config.proto +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -syntax = "proto3"; - -option go_package = "github.com/hyperledger/fabric/protos/msp"; -option java_package = "org.hyperledger.fabric.protos.msp"; -option java_outer_classname = "MspConfigPackage"; - -package msp; - -// MSPConfig collects all the configuration information for -// an MSP. The Config field should be unmarshalled in a way -// that depends on the Type -message MSPConfig { - // Type holds the type of the MSP; the default one would - // be of type FABRIC implementing an X.509 based provider - int32 type = 1; - - // Config is MSP dependent configuration info - bytes config = 2; -} - -// FabricMSPConfig collects all the configuration information for -// a Fabric MSP. -// Here we assume a default certificate validation policy, where -// any certificate signed by any of the listed rootCA certs would -// be considered as valid under this MSP. -// This MSP may or may not come with a signing identity. If it does, -// it can also issue signing identities. If it does not, it can only -// be used to validate and verify certificates. -message FabricMSPConfig { - // Name holds the identifier of the MSP; MSP identifier - // is chosen by the application that governs this MSP. - // For example, and assuming the default implementation of MSP, - // that is X.509-based and considers a single Issuer, - // this can refer to the Subject OU field or the Issuer OU field. - string name = 1; - - // List of root certificates trusted by this MSP - // they are used upon certificate validation (see - // comment for IntermediateCerts below) - repeated bytes root_certs = 2; - - // List of intermediate certificates trusted by this MSP; - // they are used upon certificate validation as follows: - // validation attempts to build a path from the certificate - // to be validated (which is at one end of the path) and - // one of the certs in the RootCerts field (which is at - // the other end of the path). If the path is longer than - // 2, certificates in the middle are searched within the - // IntermediateCerts pool - repeated bytes intermediate_certs = 3; - - // Identity denoting the administrator of this MSP - repeated bytes admins = 4; - - // Identity revocation list - repeated bytes revocation_list = 5; - - // SigningIdentity holds information on the signing identity - // this peer is to use, and which is to be imported by the - // MSP defined before - SigningIdentityInfo signing_identity = 6; - - // OrganizationalUnitIdentifiers holds one or more - // fabric organizational unit identifiers that belong to - // this MSP configuration - repeated FabricOUIdentifier organizational_unit_identifiers = 7; - - // FabricCryptoConfig contains the configuration parameters - // for the cryptographic algorithms used by this MSP - FabricCryptoConfig crypto_config = 8; - - // List of TLS root certificates trusted by this MSP. - // They are returned by GetTLSRootCerts. - repeated bytes tls_root_certs = 9; - - // List of TLS intermediate certificates trusted by this MSP; - // They are returned by GetTLSIntermediateCerts. - repeated bytes tls_intermediate_certs = 10; - - // fabric_node_ous contains the configuration to distinguish clients from peers from orderers - // based on the OUs. - FabricNodeOUs fabric_node_ous = 11; -} - -// FabricCryptoConfig contains configuration parameters -// for the cryptographic algorithms used by the MSP -// this configuration refers to -message FabricCryptoConfig { - - // SignatureHashFamily is a string representing the hash family to be used - // during sign and verify operations. - // Allowed values are "SHA2" and "SHA3". - string signature_hash_family = 1; - - // IdentityIdentifierHashFunction is a string representing the hash function - // to be used during the computation of the identity identifier of an MSP identity. - // Allowed values are "SHA256", "SHA384" and "SHA3_256", "SHA3_384". - string identity_identifier_hash_function = 2; - -} - -// IdemixMSPConfig collects all the configuration information for -// an Idemix MSP. -message IdemixMSPConfig { - // Name holds the identifier of the MSP - string name = 1; - - // ipk represents the (serialized) issuer public key - bytes ipk = 2; - - // signer may contain crypto material to configure a default signer - IdemixMSPSignerConfig signer = 3; - - // revocation_pk is the public key used for revocation of credentials - bytes revocation_pk = 4; - - // epoch represents the current epoch (time interval) used for revocation - int64 epoch = 5; -} - -// IdemixMSPSIgnerConfig contains the crypto material to set up an idemix signing identity -message IdemixMSPSignerConfig { - // cred represents the serialized idemix credential of the default signer - bytes cred = 1; - - // sk is the secret key of the default signer, corresponding to credential Cred - bytes sk = 2; - - // organizational_unit_identifier defines the organizational unit the default signer is in - string organizational_unit_identifier = 3; - - // role defines whether the default signer is admin, peer, member or client - int32 role = 4; - - // enrollment_id contains the enrollment id of this signer - string enrollment_id = 5; - - // credential_revocation_information contains a serialized CredentialRevocationInformation - bytes credential_revocation_information = 6; -} - -// SigningIdentityInfo represents the configuration information -// related to the signing identity the peer is to use for generating -// endorsements -message SigningIdentityInfo { - // PublicSigner carries the public information of the signing - // identity. For an X.509 provider this would be represented by - // an X.509 certificate - bytes public_signer = 1; - - // PrivateSigner denotes a reference to the private key of the - // peer's signing identity - KeyInfo private_signer = 2; -} - -// KeyInfo represents a (secret) key that is either already stored -// in the bccsp/keystore or key material to be imported to the -// bccsp key-store. In later versions it may contain also a -// keystore identifier -message KeyInfo { - // Identifier of the key inside the default keystore; this for - // the case of Software BCCSP as well as the HSM BCCSP would be - // the SKI of the key - string key_identifier = 1; - - // KeyMaterial (optional) for the key to be imported; this is - // properly encoded key bytes, prefixed by the type of the key - bytes key_material = 2; -} - -// FabricOUIdentifier represents an organizational unit and -// its related chain of trust identifier. -message FabricOUIdentifier { - - // Certificate represents the second certificate in a certification chain. - // (Notice that the first certificate in a certification chain is supposed - // to be the certificate of an identity). - // It must correspond to the certificate of root or intermediate CA - // recognized by the MSP this message belongs to. - // Starting from this certificate, a certification chain is computed - // and bound to the OrganizationUnitIdentifier specified - bytes certificate = 1; - - // OrganizationUnitIdentifier defines the organizational unit under the - // MSP identified with MSPIdentifier - string organizational_unit_identifier = 2; -} - -// FabricNodeOUs contains configuration to tell apart clients from peers from orderers -// based on OUs. If NodeOUs recognition is enabled then an msp identity -// that does not contain any of the specified OU will be considered invalid. -message FabricNodeOUs { - // If true then an msp identity that does not contain any of the specified OU will be considered invalid. - bool enable = 1; - - // OU Identifier of the clients - FabricOUIdentifier client_ou_identifier = 2; - - // OU Identifier of the peers - FabricOUIdentifier peer_ou_identifier = 3; - -} \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.go deleted file mode 100644 index 339dc6296..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright IBM Corp. 2017 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package msp - -import ( - "fmt" - - "github.com/golang/protobuf/proto" -) - -func (mp *MSPPrincipal) VariablyOpaqueFields() []string { - return []string{"principal"} -} - -func (mp *MSPPrincipal) VariablyOpaqueFieldProto(name string) (proto.Message, error) { - if name != mp.VariablyOpaqueFields()[0] { - return nil, fmt.Errorf("not a marshaled field: %s", name) - } - switch mp.PrincipalClassification { - case MSPPrincipal_ROLE: - return &MSPRole{}, nil - case MSPPrincipal_ORGANIZATION_UNIT: - return &OrganizationUnit{}, nil - case MSPPrincipal_IDENTITY: - return nil, fmt.Errorf("unable to decode MSP type IDENTITY until the protos are fixed to include the IDENTITY proto in protos/msp") - default: - return nil, fmt.Errorf("unable to decode MSP type: %v", mp.PrincipalClassification) - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.pb.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.pb.go deleted file mode 100644 index 9200e97f2..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.pb.go +++ /dev/null @@ -1,437 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: msp/msp_principal.proto - -package msp // import "github.com/hyperledger/fabric/protos/msp" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type MSPPrincipal_Classification int32 - -const ( - MSPPrincipal_ROLE MSPPrincipal_Classification = 0 - // one of a member of MSP network, and the one of an - // administrator of an MSP network - MSPPrincipal_ORGANIZATION_UNIT MSPPrincipal_Classification = 1 - // groupping of entities, per MSP affiliation - // E.g., this can well be represented by an MSP's - // Organization unit - MSPPrincipal_IDENTITY MSPPrincipal_Classification = 2 - // identity - MSPPrincipal_ANONYMITY MSPPrincipal_Classification = 3 - // an identity to be anonymous or nominal. - MSPPrincipal_COMBINED MSPPrincipal_Classification = 4 -) - -var MSPPrincipal_Classification_name = map[int32]string{ - 0: "ROLE", - 1: "ORGANIZATION_UNIT", - 2: "IDENTITY", - 3: "ANONYMITY", - 4: "COMBINED", -} -var MSPPrincipal_Classification_value = map[string]int32{ - "ROLE": 0, - "ORGANIZATION_UNIT": 1, - "IDENTITY": 2, - "ANONYMITY": 3, - "COMBINED": 4, -} - -func (x MSPPrincipal_Classification) String() string { - return proto.EnumName(MSPPrincipal_Classification_name, int32(x)) -} -func (MSPPrincipal_Classification) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_msp_principal_9016cf1a8a7156cd, []int{0, 0} -} - -type MSPRole_MSPRoleType int32 - -const ( - MSPRole_MEMBER MSPRole_MSPRoleType = 0 - MSPRole_ADMIN MSPRole_MSPRoleType = 1 - MSPRole_CLIENT MSPRole_MSPRoleType = 2 - MSPRole_PEER MSPRole_MSPRoleType = 3 -) - -var MSPRole_MSPRoleType_name = map[int32]string{ - 0: "MEMBER", - 1: "ADMIN", - 2: "CLIENT", - 3: "PEER", -} -var MSPRole_MSPRoleType_value = map[string]int32{ - "MEMBER": 0, - "ADMIN": 1, - "CLIENT": 2, - "PEER": 3, -} - -func (x MSPRole_MSPRoleType) String() string { - return proto.EnumName(MSPRole_MSPRoleType_name, int32(x)) -} -func (MSPRole_MSPRoleType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_msp_principal_9016cf1a8a7156cd, []int{2, 0} -} - -type MSPIdentityAnonymity_MSPIdentityAnonymityType int32 - -const ( - MSPIdentityAnonymity_NOMINAL MSPIdentityAnonymity_MSPIdentityAnonymityType = 0 - MSPIdentityAnonymity_ANONYMOUS MSPIdentityAnonymity_MSPIdentityAnonymityType = 1 -) - -var MSPIdentityAnonymity_MSPIdentityAnonymityType_name = map[int32]string{ - 0: "NOMINAL", - 1: "ANONYMOUS", -} -var MSPIdentityAnonymity_MSPIdentityAnonymityType_value = map[string]int32{ - "NOMINAL": 0, - "ANONYMOUS": 1, -} - -func (x MSPIdentityAnonymity_MSPIdentityAnonymityType) String() string { - return proto.EnumName(MSPIdentityAnonymity_MSPIdentityAnonymityType_name, int32(x)) -} -func (MSPIdentityAnonymity_MSPIdentityAnonymityType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_msp_principal_9016cf1a8a7156cd, []int{3, 0} -} - -// MSPPrincipal aims to represent an MSP-centric set of identities. -// In particular, this structure allows for definition of -// - a group of identities that are member of the same MSP -// - a group of identities that are member of the same organization unit -// in the same MSP -// - a group of identities that are administering a specific MSP -// - a specific identity -// Expressing these groups is done given two fields of the fields below -// - Classification, that defines the type of classification of identities -// in an MSP this principal would be defined on; Classification can take -// three values: -// (i) ByMSPRole: that represents a classification of identities within -// MSP based on one of the two pre-defined MSP rules, "member" and "admin" -// (ii) ByOrganizationUnit: that represents a classification of identities -// within MSP based on the organization unit an identity belongs to -// (iii)ByIdentity that denotes that MSPPrincipal is mapped to a single -// identity/certificate; this would mean that the Principal bytes -// message -type MSPPrincipal struct { - // Classification describes the way that one should process - // Principal. An Classification value of "ByOrganizationUnit" reflects - // that "Principal" contains the name of an organization this MSP - // handles. A Classification value "ByIdentity" means that - // "Principal" contains a specific identity. Default value - // denotes that Principal contains one of the groups by - // default supported by all MSPs ("admin" or "member"). - PrincipalClassification MSPPrincipal_Classification `protobuf:"varint,1,opt,name=principal_classification,json=principalClassification,proto3,enum=common.MSPPrincipal_Classification" json:"principal_classification,omitempty"` - // Principal completes the policy principal definition. For the default - // principal types, Principal can be either "Admin" or "Member". - // For the ByOrganizationUnit/ByIdentity values of Classification, - // PolicyPrincipal acquires its value from an organization unit or - // identity, respectively. - // For the Combined Classification type, the Principal is a marshalled - // CombinedPrincipal. - Principal []byte `protobuf:"bytes,2,opt,name=principal,proto3" json:"principal,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MSPPrincipal) Reset() { *m = MSPPrincipal{} } -func (m *MSPPrincipal) String() string { return proto.CompactTextString(m) } -func (*MSPPrincipal) ProtoMessage() {} -func (*MSPPrincipal) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_principal_9016cf1a8a7156cd, []int{0} -} -func (m *MSPPrincipal) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MSPPrincipal.Unmarshal(m, b) -} -func (m *MSPPrincipal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MSPPrincipal.Marshal(b, m, deterministic) -} -func (dst *MSPPrincipal) XXX_Merge(src proto.Message) { - xxx_messageInfo_MSPPrincipal.Merge(dst, src) -} -func (m *MSPPrincipal) XXX_Size() int { - return xxx_messageInfo_MSPPrincipal.Size(m) -} -func (m *MSPPrincipal) XXX_DiscardUnknown() { - xxx_messageInfo_MSPPrincipal.DiscardUnknown(m) -} - -var xxx_messageInfo_MSPPrincipal proto.InternalMessageInfo - -func (m *MSPPrincipal) GetPrincipalClassification() MSPPrincipal_Classification { - if m != nil { - return m.PrincipalClassification - } - return MSPPrincipal_ROLE -} - -func (m *MSPPrincipal) GetPrincipal() []byte { - if m != nil { - return m.Principal - } - return nil -} - -// OrganizationUnit governs the organization of the Principal -// field of a policy principal when a specific organization unity members -// are to be defined within a policy principal. -type OrganizationUnit struct { - // MSPIdentifier represents the identifier of the MSP this organization unit - // refers to - MspIdentifier string `protobuf:"bytes,1,opt,name=msp_identifier,json=mspIdentifier,proto3" json:"msp_identifier,omitempty"` - // OrganizationUnitIdentifier defines the organizational unit under the - // MSP identified with MSPIdentifier - OrganizationalUnitIdentifier string `protobuf:"bytes,2,opt,name=organizational_unit_identifier,json=organizationalUnitIdentifier,proto3" json:"organizational_unit_identifier,omitempty"` - // CertifiersIdentifier is the hash of certificates chain of trust - // related to this organizational unit - CertifiersIdentifier []byte `protobuf:"bytes,3,opt,name=certifiers_identifier,json=certifiersIdentifier,proto3" json:"certifiers_identifier,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OrganizationUnit) Reset() { *m = OrganizationUnit{} } -func (m *OrganizationUnit) String() string { return proto.CompactTextString(m) } -func (*OrganizationUnit) ProtoMessage() {} -func (*OrganizationUnit) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_principal_9016cf1a8a7156cd, []int{1} -} -func (m *OrganizationUnit) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OrganizationUnit.Unmarshal(m, b) -} -func (m *OrganizationUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OrganizationUnit.Marshal(b, m, deterministic) -} -func (dst *OrganizationUnit) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrganizationUnit.Merge(dst, src) -} -func (m *OrganizationUnit) XXX_Size() int { - return xxx_messageInfo_OrganizationUnit.Size(m) -} -func (m *OrganizationUnit) XXX_DiscardUnknown() { - xxx_messageInfo_OrganizationUnit.DiscardUnknown(m) -} - -var xxx_messageInfo_OrganizationUnit proto.InternalMessageInfo - -func (m *OrganizationUnit) GetMspIdentifier() string { - if m != nil { - return m.MspIdentifier - } - return "" -} - -func (m *OrganizationUnit) GetOrganizationalUnitIdentifier() string { - if m != nil { - return m.OrganizationalUnitIdentifier - } - return "" -} - -func (m *OrganizationUnit) GetCertifiersIdentifier() []byte { - if m != nil { - return m.CertifiersIdentifier - } - return nil -} - -// MSPRole governs the organization of the Principal -// field of an MSPPrincipal when it aims to define one of the -// two dedicated roles within an MSP: Admin and Members. -type MSPRole struct { - // MSPIdentifier represents the identifier of the MSP this principal - // refers to - MspIdentifier string `protobuf:"bytes,1,opt,name=msp_identifier,json=mspIdentifier,proto3" json:"msp_identifier,omitempty"` - // MSPRoleType defines which of the available, pre-defined MSP-roles - // an identiy should posess inside the MSP with identifier MSPidentifier - Role MSPRole_MSPRoleType `protobuf:"varint,2,opt,name=role,proto3,enum=common.MSPRole_MSPRoleType" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MSPRole) Reset() { *m = MSPRole{} } -func (m *MSPRole) String() string { return proto.CompactTextString(m) } -func (*MSPRole) ProtoMessage() {} -func (*MSPRole) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_principal_9016cf1a8a7156cd, []int{2} -} -func (m *MSPRole) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MSPRole.Unmarshal(m, b) -} -func (m *MSPRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MSPRole.Marshal(b, m, deterministic) -} -func (dst *MSPRole) XXX_Merge(src proto.Message) { - xxx_messageInfo_MSPRole.Merge(dst, src) -} -func (m *MSPRole) XXX_Size() int { - return xxx_messageInfo_MSPRole.Size(m) -} -func (m *MSPRole) XXX_DiscardUnknown() { - xxx_messageInfo_MSPRole.DiscardUnknown(m) -} - -var xxx_messageInfo_MSPRole proto.InternalMessageInfo - -func (m *MSPRole) GetMspIdentifier() string { - if m != nil { - return m.MspIdentifier - } - return "" -} - -func (m *MSPRole) GetRole() MSPRole_MSPRoleType { - if m != nil { - return m.Role - } - return MSPRole_MEMBER -} - -// MSPIdentityAnonymity can be used to enforce an identity to be anonymous or nominal. -type MSPIdentityAnonymity struct { - AnonymityType MSPIdentityAnonymity_MSPIdentityAnonymityType `protobuf:"varint,1,opt,name=anonymity_type,json=anonymityType,proto3,enum=common.MSPIdentityAnonymity_MSPIdentityAnonymityType" json:"anonymity_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MSPIdentityAnonymity) Reset() { *m = MSPIdentityAnonymity{} } -func (m *MSPIdentityAnonymity) String() string { return proto.CompactTextString(m) } -func (*MSPIdentityAnonymity) ProtoMessage() {} -func (*MSPIdentityAnonymity) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_principal_9016cf1a8a7156cd, []int{3} -} -func (m *MSPIdentityAnonymity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MSPIdentityAnonymity.Unmarshal(m, b) -} -func (m *MSPIdentityAnonymity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MSPIdentityAnonymity.Marshal(b, m, deterministic) -} -func (dst *MSPIdentityAnonymity) XXX_Merge(src proto.Message) { - xxx_messageInfo_MSPIdentityAnonymity.Merge(dst, src) -} -func (m *MSPIdentityAnonymity) XXX_Size() int { - return xxx_messageInfo_MSPIdentityAnonymity.Size(m) -} -func (m *MSPIdentityAnonymity) XXX_DiscardUnknown() { - xxx_messageInfo_MSPIdentityAnonymity.DiscardUnknown(m) -} - -var xxx_messageInfo_MSPIdentityAnonymity proto.InternalMessageInfo - -func (m *MSPIdentityAnonymity) GetAnonymityType() MSPIdentityAnonymity_MSPIdentityAnonymityType { - if m != nil { - return m.AnonymityType - } - return MSPIdentityAnonymity_NOMINAL -} - -// CombinedPrincipal governs the organization of the Principal -// field of a policy principal when principal_classification has -// indicated that a combined form of principals is required -type CombinedPrincipal struct { - // Principals refer to combined principals - Principals []*MSPPrincipal `protobuf:"bytes,1,rep,name=principals,proto3" json:"principals,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CombinedPrincipal) Reset() { *m = CombinedPrincipal{} } -func (m *CombinedPrincipal) String() string { return proto.CompactTextString(m) } -func (*CombinedPrincipal) ProtoMessage() {} -func (*CombinedPrincipal) Descriptor() ([]byte, []int) { - return fileDescriptor_msp_principal_9016cf1a8a7156cd, []int{4} -} -func (m *CombinedPrincipal) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CombinedPrincipal.Unmarshal(m, b) -} -func (m *CombinedPrincipal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CombinedPrincipal.Marshal(b, m, deterministic) -} -func (dst *CombinedPrincipal) XXX_Merge(src proto.Message) { - xxx_messageInfo_CombinedPrincipal.Merge(dst, src) -} -func (m *CombinedPrincipal) XXX_Size() int { - return xxx_messageInfo_CombinedPrincipal.Size(m) -} -func (m *CombinedPrincipal) XXX_DiscardUnknown() { - xxx_messageInfo_CombinedPrincipal.DiscardUnknown(m) -} - -var xxx_messageInfo_CombinedPrincipal proto.InternalMessageInfo - -func (m *CombinedPrincipal) GetPrincipals() []*MSPPrincipal { - if m != nil { - return m.Principals - } - return nil -} - -func init() { - proto.RegisterType((*MSPPrincipal)(nil), "common.MSPPrincipal") - proto.RegisterType((*OrganizationUnit)(nil), "common.OrganizationUnit") - proto.RegisterType((*MSPRole)(nil), "common.MSPRole") - proto.RegisterType((*MSPIdentityAnonymity)(nil), "common.MSPIdentityAnonymity") - proto.RegisterType((*CombinedPrincipal)(nil), "common.CombinedPrincipal") - proto.RegisterEnum("common.MSPPrincipal_Classification", MSPPrincipal_Classification_name, MSPPrincipal_Classification_value) - proto.RegisterEnum("common.MSPRole_MSPRoleType", MSPRole_MSPRoleType_name, MSPRole_MSPRoleType_value) - proto.RegisterEnum("common.MSPIdentityAnonymity_MSPIdentityAnonymityType", MSPIdentityAnonymity_MSPIdentityAnonymityType_name, MSPIdentityAnonymity_MSPIdentityAnonymityType_value) -} - -func init() { - proto.RegisterFile("msp/msp_principal.proto", fileDescriptor_msp_principal_9016cf1a8a7156cd) -} - -var fileDescriptor_msp_principal_9016cf1a8a7156cd = []byte{ - // 519 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x6a, 0xdb, 0x30, - 0x14, 0xc6, 0xeb, 0xa4, 0x6b, 0x9b, 0x93, 0x3f, 0xa8, 0x22, 0xa5, 0x81, 0x95, 0x11, 0xbc, 0x0d, - 0x72, 0xe5, 0x40, 0xba, 0xed, 0x62, 0x77, 0x4e, 0x62, 0x86, 0x20, 0x96, 0x8d, 0xe3, 0x5c, 0xb4, - 0x94, 0x05, 0xc7, 0x51, 0x52, 0x81, 0x6d, 0x19, 0xdb, 0xbd, 0xf0, 0xde, 0x65, 0x6f, 0xb0, 0xcb, - 0x3d, 0xd5, 0x9e, 0x62, 0xd8, 0x6e, 0x12, 0x65, 0xeb, 0x60, 0x57, 0xf6, 0x39, 0xe7, 0xf7, 0x1d, - 0x1d, 0x49, 0x9f, 0xe0, 0x3a, 0x4c, 0xe3, 0x61, 0x98, 0xc6, 0xcb, 0x38, 0xe1, 0x91, 0xcf, 0x63, - 0x2f, 0xd0, 0xe2, 0x44, 0x64, 0x02, 0x9f, 0xf9, 0x22, 0x0c, 0x45, 0xa4, 0xfe, 0x52, 0xa0, 0x65, - 0xce, 0x6d, 0x7b, 0x57, 0xc6, 0x5f, 0xa1, 0xb7, 0x67, 0x97, 0x7e, 0xe0, 0xa5, 0x29, 0xdf, 0x70, - 0xdf, 0xcb, 0xb8, 0x88, 0x7a, 0x4a, 0x5f, 0x19, 0x74, 0x46, 0x6f, 0xb5, 0x4a, 0xab, 0xc9, 0x3a, - 0x6d, 0x72, 0x84, 0x3a, 0xd7, 0xfb, 0x26, 0xc7, 0x05, 0x7c, 0x03, 0x8d, 0x7d, 0xa9, 0x57, 0xeb, - 0x2b, 0x83, 0x96, 0x73, 0x48, 0xa8, 0x0f, 0xd0, 0xf9, 0x83, 0xbf, 0x80, 0x53, 0xc7, 0x9a, 0x19, - 0xe8, 0x04, 0x5f, 0xc1, 0xa5, 0xe5, 0x7c, 0xd1, 0x29, 0xb9, 0xd7, 0x5d, 0x62, 0xd1, 0xe5, 0x82, - 0x12, 0x17, 0x29, 0xb8, 0x05, 0x17, 0x64, 0x6a, 0x50, 0x97, 0xb8, 0x77, 0xa8, 0x86, 0xdb, 0xd0, - 0xd0, 0xa9, 0x45, 0xef, 0xcc, 0x22, 0xac, 0x17, 0xc5, 0x89, 0x65, 0x8e, 0x09, 0x35, 0xa6, 0xe8, - 0x54, 0xfd, 0xa9, 0x00, 0xb2, 0x92, 0xad, 0x17, 0xf1, 0x6f, 0x65, 0xf3, 0x45, 0xc4, 0x33, 0xfc, - 0x1e, 0x3a, 0xc5, 0x01, 0xf1, 0x35, 0x8b, 0x32, 0xbe, 0xe1, 0x2c, 0x29, 0xb7, 0xd9, 0x70, 0xda, - 0x61, 0x1a, 0x93, 0x7d, 0x12, 0x4f, 0xe1, 0x8d, 0x90, 0xa4, 0x5e, 0xb0, 0x7c, 0x8a, 0x78, 0x26, - 0xcb, 0x6a, 0xa5, 0xec, 0xe6, 0x98, 0x2a, 0x96, 0x90, 0xba, 0xdc, 0xc2, 0x95, 0xcf, 0x92, 0x2a, - 0x48, 0x65, 0x71, 0xbd, 0x3c, 0x89, 0xee, 0xa1, 0x78, 0x10, 0xa9, 0xdf, 0x15, 0x38, 0x37, 0xe7, - 0xb6, 0x23, 0x02, 0xf6, 0xbf, 0xd3, 0x0e, 0xe1, 0x34, 0x11, 0x01, 0x2b, 0x67, 0xea, 0x8c, 0x5e, - 0x4b, 0x37, 0x56, 0x74, 0xd9, 0x7d, 0xdd, 0x3c, 0x66, 0x4e, 0x09, 0xaa, 0x9f, 0xa1, 0x29, 0x25, - 0x31, 0xc0, 0x99, 0x69, 0x98, 0x63, 0xc3, 0x41, 0x27, 0xb8, 0x01, 0xaf, 0xf4, 0xa9, 0x49, 0x28, - 0x52, 0x8a, 0xf4, 0x64, 0x46, 0x0c, 0xea, 0xa2, 0x5a, 0x71, 0x31, 0xb6, 0x61, 0x38, 0xa8, 0xae, - 0xfe, 0x50, 0xa0, 0x6b, 0xce, 0xed, 0x6a, 0xf9, 0x2c, 0xd7, 0x23, 0x11, 0xe5, 0x21, 0xcf, 0x72, - 0xfc, 0x00, 0x1d, 0x6f, 0x17, 0x2c, 0xb3, 0x3c, 0x66, 0xcf, 0x0e, 0xfa, 0x28, 0xcd, 0xf3, 0x97, - 0xea, 0xc5, 0x64, 0x39, 0x69, 0xdb, 0x93, 0x43, 0xf5, 0x13, 0xf4, 0xfe, 0x85, 0xe2, 0x26, 0x9c, - 0x53, 0xcb, 0x24, 0x54, 0x9f, 0xa1, 0x93, 0x83, 0x27, 0xac, 0xc5, 0x1c, 0x29, 0x2a, 0x81, 0xcb, - 0x89, 0x08, 0x57, 0x3c, 0x62, 0xeb, 0x83, 0xed, 0x3f, 0x00, 0xec, 0x5d, 0x98, 0xf6, 0x94, 0x7e, - 0x7d, 0xd0, 0x1c, 0x75, 0x5f, 0x32, 0xba, 0x23, 0x71, 0x63, 0x1b, 0xde, 0x89, 0x64, 0xab, 0x3d, - 0xe6, 0x31, 0x4b, 0x02, 0xb6, 0xde, 0xb2, 0x44, 0xdb, 0x78, 0xab, 0x84, 0xfb, 0xd5, 0x2b, 0x4b, - 0x9f, 0x1b, 0xdc, 0x0f, 0xb6, 0x3c, 0x7b, 0x7c, 0x5a, 0x15, 0xe1, 0x50, 0x82, 0x87, 0x15, 0x3c, - 0xac, 0xe0, 0xe2, 0x9d, 0xae, 0xce, 0xca, 0xff, 0xdb, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x40, - 0x36, 0xd2, 0xf9, 0xb9, 0x03, 0x00, 0x00, -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.proto b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.proto deleted file mode 100644 index 972f0fe4d..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/hyperledger/fabric/protos/msp/msp_principal.proto +++ /dev/null @@ -1,153 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -syntax = "proto3"; - -option go_package = "github.com/hyperledger/fabric/protos/msp"; -option java_package = "org.hyperledger.fabric.protos.common"; - -package common; - - -// msp_principal.proto contains proto messages defining the generalized -// MSP notion of identity called an MSPPrincipal. It is used as part of -// the chain configuration, in particular as the identity parameters to -// the configuration.proto file. This does not represent the MSP -// configuration for a chain, but is understood by MSPs - -// MSPPrincipal aims to represent an MSP-centric set of identities. -// In particular, this structure allows for definition of -// - a group of identities that are member of the same MSP -// - a group of identities that are member of the same organization unit -// in the same MSP -// - a group of identities that are administering a specific MSP -// - a specific identity -// Expressing these groups is done given two fields of the fields below -// - Classification, that defines the type of classification of identities -// in an MSP this principal would be defined on; Classification can take -// three values: -// (i) ByMSPRole: that represents a classification of identities within -// MSP based on one of the two pre-defined MSP rules, "member" and "admin" -// (ii) ByOrganizationUnit: that represents a classification of identities -// within MSP based on the organization unit an identity belongs to -// (iii)ByIdentity that denotes that MSPPrincipal is mapped to a single -// identity/certificate; this would mean that the Principal bytes -// message -message MSPPrincipal { - - enum Classification { - ROLE = 0; // Represents the one of the dedicated MSP roles, the - // one of a member of MSP network, and the one of an - // administrator of an MSP network - ORGANIZATION_UNIT = 1; // Denotes a finer grained (affiliation-based) - // groupping of entities, per MSP affiliation - // E.g., this can well be represented by an MSP's - // Organization unit - IDENTITY = 2; // Denotes a principal that consists of a single - // identity - ANONYMITY = 3; // Denotes a principal that can be used to enforce - // an identity to be anonymous or nominal. - COMBINED = 4; // Denotes a combined principal - } - - // Classification describes the way that one should process - // Principal. An Classification value of "ByOrganizationUnit" reflects - // that "Principal" contains the name of an organization this MSP - // handles. A Classification value "ByIdentity" means that - // "Principal" contains a specific identity. Default value - // denotes that Principal contains one of the groups by - // default supported by all MSPs ("admin" or "member"). - Classification principal_classification = 1; - - // Principal completes the policy principal definition. For the default - // principal types, Principal can be either "Admin" or "Member". - // For the ByOrganizationUnit/ByIdentity values of Classification, - // PolicyPrincipal acquires its value from an organization unit or - // identity, respectively. - // For the Combined Classification type, the Principal is a marshalled - // CombinedPrincipal. - bytes principal = 2; -} - - -// OrganizationUnit governs the organization of the Principal -// field of a policy principal when a specific organization unity members -// are to be defined within a policy principal. -message OrganizationUnit { - - // MSPIdentifier represents the identifier of the MSP this organization unit - // refers to - string msp_identifier = 1; - - // OrganizationUnitIdentifier defines the organizational unit under the - // MSP identified with MSPIdentifier - string organizational_unit_identifier = 2; - - // CertifiersIdentifier is the hash of certificates chain of trust - // related to this organizational unit - bytes certifiers_identifier = 3; -} - -// MSPRole governs the organization of the Principal -// field of an MSPPrincipal when it aims to define one of the -// two dedicated roles within an MSP: Admin and Members. -message MSPRole { - - // MSPIdentifier represents the identifier of the MSP this principal - // refers to - string msp_identifier = 1; - - enum MSPRoleType { - MEMBER = 0; // Represents an MSP Member - ADMIN = 1; // Represents an MSP Admin - CLIENT = 2; // Represents an MSP Client - PEER = 3; // Represents an MSP Peer - } - - // MSPRoleType defines which of the available, pre-defined MSP-roles - // an identiy should posess inside the MSP with identifier MSPidentifier - MSPRoleType role = 2; - -} - -// MSPIdentityAnonymity can be used to enforce an identity to be anonymous or nominal. -message MSPIdentityAnonymity { - - enum MSPIdentityAnonymityType { - NOMINAL = 0; // Represents a nominal MSP Identity - ANONYMOUS = 1; // Represents an anonymous MSP Identity - } - - MSPIdentityAnonymityType anonymity_type = 1; - -} - -// CombinedPrincipal governs the organization of the Principal -// field of a policy principal when principal_classification has -// indicated that a combined form of principals is required -message CombinedPrincipal { - - // Principals refer to combined principals - repeated MSPPrincipal principals = 1; -} - -// TODO: Bring msp.SerializedIdentity from fabric/msp/identities.proto here. Reason below. -// SerializedIdentity represents an serialized version of an identity; -// this consists of an MSP-identifier this identity would correspond to -// and the bytes of the actual identity. A serialized form of -// SerializedIdentity would govern "Principal" field of a PolicyPrincipal -// of classification "ByIdentity". diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/LICENSE b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e75..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/README.md b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 6483ba2af..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Contributing - -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. - -Before proposing a change, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/appveyor.yml b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade0..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/errors.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 1963d86bf..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,282 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/stack.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 2874a048c..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,147 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strings" -) - -// Frame represents a program counter inside a stack frame. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - fmt.Fprintf(s, "%d", f.line()) - case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - fmt.Fprintf(s, "%v", []Frame(st)) - } - case 's': - fmt.Fprintf(s, "%s", []Frame(st)) - } -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/vendor.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/vendor.json deleted file mode 100644 index 9794848f1..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/abac/go/vendor/vendor.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "comment": "", - "ignore": "test", - "package": [ - { - "checksumSHA1": "GaJLoEuMGnP5ofXvuweAI4wx06U=", - "path": "github.com/golang/protobuf/proto", - "revision": "1918e1ff6ffd2be7bed0553df8650672c3bfe80d", - "revisionTime": "2018-10-30T15:47:21Z" - }, - { - "checksumSHA1": "XGpUl1X+7ly1ski4Pc+N9ozfVv8=", - "path": "github.com/hyperledger/fabric/core/chaincode/shim/ext/attrmgr", - "revision": "60f968db8e6e2ebcf439391610e22250993d0a85", - "revisionTime": "2018-09-12T02:19:31Z" - }, - { - "checksumSHA1": "vFuT7942CfsCcH9IG3zHmQ4d/oI=", - "path": "github.com/hyperledger/fabric/core/chaincode/shim/ext/cid", - "revision": "60f968db8e6e2ebcf439391610e22250993d0a85", - "revisionTime": "2018-09-12T02:19:31Z" - }, - { - "checksumSHA1": "ZzWCzHsWRI/LAxhZYUMqVcIAsZQ=", - "path": "github.com/hyperledger/fabric/protos/msp", - "revision": "60f968db8e6e2ebcf439391610e22250993d0a85", - "revisionTime": "2018-09-12T02:19:31Z" - }, - { - "checksumSHA1": "DTy0iJ2w5C+FDsN9EnzfhNmvS+o=", - "path": "github.com/pkg/errors", - "revision": "059132a15dd08d6704c67711dae0cf35ab991756", - "revisionTime": "2018-10-23T23:59:46Z" - } - ], - "rootPath": "github.com/hyperledger/fabric-samples/chaincode/abac/go" -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/go/chaincode_example02.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/go/chaincode_example02.go deleted file mode 100644 index 534380664..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/go/chaincode_example02.go +++ /dev/null @@ -1,199 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -//WARNING - this chaincode's ID is hard-coded in chaincode_example04 to illustrate one way of -//calling chaincode from a chaincode. If this example is modified, chaincode_example04.go has -//to be modified as well with the new ID of chaincode_example02. -//chaincode_example05 show's how chaincode ID can be passed in as a parameter instead of -//hard-coding. - -import ( - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - fmt.Println("ex02 Init") - _, args := stub.GetFunctionAndParameters() - var A, B string // Entities - var Aval, Bval int // Asset holdings - var err error - - if len(args) != 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - // Initialize the chaincode - A = args[0] - Aval, err = strconv.Atoi(args[1]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - B = args[2] - Bval, err = strconv.Atoi(args[3]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) - - // Write the state to the ledger - err = stub.PutState(A, []byte(strconv.Itoa(Aval))) - if err != nil { - return shim.Error(err.Error()) - } - - err = stub.PutState(B, []byte(strconv.Itoa(Bval))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - fmt.Println("ex02 Invoke") - function, args := stub.GetFunctionAndParameters() - if function == "invoke" { - // Make payment of X units from A to B - return t.invoke(stub, args) - } else if function == "delete" { - // Deletes an entity from its state - return t.delete(stub, args) - } else if function == "query" { - // the old "Query" is now implemtned in invoke - return t.query(stub, args) - } - - return shim.Error("Invalid invoke function name. Expecting \"invoke\" \"delete\" \"query\"") -} - -// Transaction makes payment of X units from A to B -func (t *SimpleChaincode) invoke(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var A, B string // Entities - var Aval, Bval int // Asset holdings - var X int // Transaction value - var err error - - if len(args) != 3 { - return shim.Error("Incorrect number of arguments. Expecting 3") - } - - A = args[0] - B = args[1] - - // Get the state from the ledger - // TODO: will be nice to have a GetAllState call to ledger - Avalbytes, err := stub.GetState(A) - if err != nil { - return shim.Error("Failed to get state") - } - if Avalbytes == nil { - return shim.Error("Entity not found") - } - Aval, _ = strconv.Atoi(string(Avalbytes)) - - Bvalbytes, err := stub.GetState(B) - if err != nil { - return shim.Error("Failed to get state") - } - if Bvalbytes == nil { - return shim.Error("Entity not found") - } - Bval, _ = strconv.Atoi(string(Bvalbytes)) - - // Perform the execution - X, err = strconv.Atoi(args[2]) - if err != nil { - return shim.Error("Invalid transaction amount, expecting a integer value") - } - Aval = Aval - X - Bval = Bval + X - fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) - - // Write the state back to the ledger - err = stub.PutState(A, []byte(strconv.Itoa(Aval))) - if err != nil { - return shim.Error(err.Error()) - } - - err = stub.PutState(B, []byte(strconv.Itoa(Bval))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -// Deletes an entity from state -func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - A := args[0] - - // Delete the key from the state in ledger - err := stub.DelState(A) - if err != nil { - return shim.Error("Failed to delete state") - } - - return shim.Success(nil) -} - -// query callback representing the query of a chaincode -func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var A string // Entities - var err error - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting name of the person to query") - } - - A = args[0] - - // Get the state from the ledger - Avalbytes, err := stub.GetState(A) - if err != nil { - jsonResp := "{\"Error\":\"Failed to get state for " + A + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - jsonResp := "{\"Error\":\"Nil amount for " + A + "\"}" - return shim.Error(jsonResp) - } - - jsonResp := "{\"Name\":\"" + A + "\",\"Amount\":\"" + string(Avalbytes) + "\"}" - fmt.Printf("Query Response:%s\n", jsonResp) - return shim.Success(Avalbytes) -} - -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/.gitignore b/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/.gitignore deleted file mode 100644 index 4fd7c46dd..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -bin/ -.classpath -.settings/ -.gradle \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/build.gradle b/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/build.gradle deleted file mode 100644 index 5221272c1..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/build.gradle +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright IBM Corp. 2018 All Rights Reserved. - * - * SPDX-License-Identifier: Apache-2.0 - */ -plugins { - id 'com.github.johnrengelman.shadow' version '2.0.3' - id 'java' -} - -group 'org.hyperledger.fabric-chaincode-java' -version '1.0-SNAPSHOT' - -sourceCompatibility = 1.8 - -repositories { - mavenLocal() - mavenCentral() -} - -dependencies { - compile group: 'org.hyperledger.fabric-chaincode-java', name: 'fabric-chaincode-shim', version: '1.+' - testCompile group: 'junit', name: 'junit', version: '4.12' -} - -shadowJar { - baseName = 'chaincode' - version = null - classifier = null - - manifest { - attributes 'Main-Class': 'org.hyperledger.fabric.example.SimpleChaincode' - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/settings.gradle b/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/settings.gradle deleted file mode 100644 index 9ce14a668..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/settings.gradle +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Copyright IBM Corp. 2017 All Rights Reserved. - * - * SPDX-License-Identifier: Apache-2.0 - */ -rootProject.name = 'fabric-chaincode-example-gradle' - diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/src/main/java/org/hyperledger/fabric/example/SimpleChaincode.java b/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/src/main/java/org/hyperledger/fabric/example/SimpleChaincode.java deleted file mode 100644 index dd93a4e06..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/java/src/main/java/org/hyperledger/fabric/example/SimpleChaincode.java +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright IBM Corp., DTCC All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ -package org.hyperledger.fabric.example; - -import java.util.List; - -import com.google.protobuf.ByteString; -import io.netty.handler.ssl.OpenSsl; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.hyperledger.fabric.shim.ChaincodeBase; -import org.hyperledger.fabric.shim.ChaincodeStub; - -import static java.nio.charset.StandardCharsets.UTF_8; - -public class SimpleChaincode extends ChaincodeBase { - - private static Log _logger = LogFactory.getLog(SimpleChaincode.class); - - @Override - public Response init(ChaincodeStub stub) { - try { - _logger.info("Init java simple chaincode"); - String func = stub.getFunction(); - if (!func.equals("init")) { - return newErrorResponse("function other than init is not supported"); - } - List args = stub.getParameters(); - if (args.size() != 4) { - newErrorResponse("Incorrect number of arguments. Expecting 4"); - } - // Initialize the chaincode - String account1Key = args.get(0); - int account1Value = Integer.parseInt(args.get(1)); - String account2Key = args.get(2); - int account2Value = Integer.parseInt(args.get(3)); - - _logger.info(String.format("account %s, value = %s; account %s, value %s", account1Key, account1Value, account2Key, account2Value)); - stub.putStringState(account1Key, args.get(1)); - stub.putStringState(account2Key, args.get(3)); - - return newSuccessResponse(); - } catch (Throwable e) { - return newErrorResponse(e); - } - } - - @Override - public Response invoke(ChaincodeStub stub) { - try { - _logger.info("Invoke java simple chaincode"); - String func = stub.getFunction(); - List params = stub.getParameters(); - if (func.equals("invoke")) { - return invoke(stub, params); - } - if (func.equals("delete")) { - return delete(stub, params); - } - if (func.equals("query")) { - return query(stub, params); - } - return newErrorResponse("Invalid invoke function name. Expecting one of: [\"invoke\", \"delete\", \"query\"]"); - } catch (Throwable e) { - return newErrorResponse(e); - } - } - - private Response invoke(ChaincodeStub stub, List args) { - if (args.size() != 3) { - return newErrorResponse("Incorrect number of arguments. Expecting 3"); - } - String accountFromKey = args.get(0); - String accountToKey = args.get(1); - - String accountFromValueStr = stub.getStringState(accountFromKey); - if (accountFromValueStr == null) { - return newErrorResponse(String.format("Entity %s not found", accountFromKey)); - } - int accountFromValue = Integer.parseInt(accountFromValueStr); - - String accountToValueStr = stub.getStringState(accountToKey); - if (accountToValueStr == null) { - return newErrorResponse(String.format("Entity %s not found", accountToKey)); - } - int accountToValue = Integer.parseInt(accountToValueStr); - - int amount = Integer.parseInt(args.get(2)); - - if (amount > accountFromValue) { - return newErrorResponse(String.format("not enough money in account %s", accountFromKey)); - } - - accountFromValue -= amount; - accountToValue += amount; - - _logger.info(String.format("new value of A: %s", accountFromValue)); - _logger.info(String.format("new value of B: %s", accountToValue)); - - stub.putStringState(accountFromKey, Integer.toString(accountFromValue)); - stub.putStringState(accountToKey, Integer.toString(accountToValue)); - - _logger.info("Transfer complete"); - - return newSuccessResponse("invoke finished successfully", ByteString.copyFrom(accountFromKey + ": " + accountFromValue + " " + accountToKey + ": " + accountToValue, UTF_8).toByteArray()); - } - - // Deletes an entity from state - private Response delete(ChaincodeStub stub, List args) { - if (args.size() != 1) { - return newErrorResponse("Incorrect number of arguments. Expecting 1"); - } - String key = args.get(0); - // Delete the key from the state in ledger - stub.delState(key); - return newSuccessResponse(); - } - - // query callback representing the query of a chaincode - private Response query(ChaincodeStub stub, List args) { - if (args.size() != 1) { - return newErrorResponse("Incorrect number of arguments. Expecting name of the person to query"); - } - String key = args.get(0); - //byte[] stateBytes - String val = stub.getStringState(key); - if (val == null) { - return newErrorResponse(String.format("Error: state for %s is null", key)); - } - _logger.info(String.format("Query Response:\nName: %s, Amount: %s\n", key, val)); - return newSuccessResponse(val, ByteString.copyFrom(val, UTF_8).toByteArray()); - } - - public static void main(String[] args) { - System.out.println("OpenSSL avaliable: " + OpenSsl.isAvailable()); - new SimpleChaincode().start(args); - } - -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/node/chaincode_example02.js b/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/node/chaincode_example02.js deleted file mode 100644 index 545092aff..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/node/chaincode_example02.js +++ /dev/null @@ -1,138 +0,0 @@ -/* -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -*/ - -const shim = require('fabric-shim'); -const util = require('util'); - -var Chaincode = class { - - // Initialize the chaincode - async Init(stub) { - console.info('========= example02 Init ========='); - let ret = stub.getFunctionAndParameters(); - console.info(ret); - let args = ret.params; - // initialise only if 4 parameters passed. - if (args.length != 4) { - return shim.error('Incorrect number of arguments. Expecting 4'); - } - - let A = args[0]; - let B = args[2]; - let Aval = args[1]; - let Bval = args[3]; - - if (typeof parseInt(Aval) !== 'number' || typeof parseInt(Bval) !== 'number') { - return shim.error('Expecting integer value for asset holding'); - } - - try { - await stub.putState(A, Buffer.from(Aval)); - try { - await stub.putState(B, Buffer.from(Bval)); - return shim.success(); - } catch (err) { - return shim.error(err); - } - } catch (err) { - return shim.error(err); - } - } - - async Invoke(stub) { - let ret = stub.getFunctionAndParameters(); - console.info(ret); - let method = this[ret.fcn]; - if (!method) { - console.log('no method of name:' + ret.fcn + ' found'); - return shim.success(); - } - try { - let payload = await method(stub, ret.params); - return shim.success(payload); - } catch (err) { - console.log(err); - return shim.error(err); - } - } - - async invoke(stub, args) { - if (args.length != 3) { - throw new Error('Incorrect number of arguments. Expecting 3'); - } - - let A = args[0]; - let B = args[1]; - if (!A || !B) { - throw new Error('asset holding must not be empty'); - } - - // Get the state from the ledger - let Avalbytes = await stub.getState(A); - if (!Avalbytes) { - throw new Error('Failed to get state of asset holder A'); - } - let Aval = parseInt(Avalbytes.toString()); - - let Bvalbytes = await stub.getState(B); - if (!Bvalbytes) { - throw new Error('Failed to get state of asset holder B'); - } - - let Bval = parseInt(Bvalbytes.toString()); - // Perform the execution - let amount = parseInt(args[2]); - if (typeof amount !== 'number') { - throw new Error('Expecting integer value for amount to be transaferred'); - } - - Aval = Aval - amount; - Bval = Bval + amount; - console.info(util.format('Aval = %d, Bval = %d\n', Aval, Bval)); - - // Write the states back to the ledger - await stub.putState(A, Buffer.from(Aval.toString())); - await stub.putState(B, Buffer.from(Bval.toString())); - - } - - // Deletes an entity from state - async delete(stub, args) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting 1'); - } - - let A = args[0]; - - // Delete the key from the state in ledger - await stub.deleteState(A); - } - - // query callback representing the query of a chaincode - async query(stub, args) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting name of the person to query') - } - - let jsonResp = {}; - let A = args[0]; - - // Get the state from the ledger - let Avalbytes = await stub.getState(A); - if (!Avalbytes) { - jsonResp.error = 'Failed to get state for ' + A; - throw new Error(JSON.stringify(jsonResp)); - } - - jsonResp.name = A; - jsonResp.amount = Avalbytes.toString(); - console.info('Query Response:'); - console.info(jsonResp); - return Avalbytes; - } -}; - -shim.start(new Chaincode()); diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/node/package.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/node/package.json deleted file mode 100644 index 84e201d92..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/chaincode_example02/node/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "chaincode_example02", - "version": "1.0.0", - "description": "chaincode_example02 chaincode implemented in node.js", - "engines": { - "node": ">=8.4.0", - "npm": ">=5.3.0" - }, - "scripts": { "start" : "node chaincode_example02.js" }, - "engine-strict": true, - "license": "Apache-2.0", - "dependencies": { - "fabric-shim": "~1.4.0" - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/go/fabcar.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/go/fabcar.go deleted file mode 100644 index 01792bf47..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/go/fabcar.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * The sample smart contract for documentation topic: - * Writing Your First Blockchain Application - */ - -package main - -/* Imports - * 4 utility libraries for formatting, handling bytes, reading and writing JSON, and string manipulation - * 2 specific Hyperledger Fabric specific libraries for Smart Contracts - */ -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - sc "github.com/hyperledger/fabric/protos/peer" -) - -// Define the Smart Contract structure -type SmartContract struct { -} - -// Define the car structure, with 4 properties. Structure tags are used by encoding/json library -type Car struct { - Make string `json:"make"` - Model string `json:"model"` - Colour string `json:"colour"` - Owner string `json:"owner"` -} - -/* - * The Init method is called when the Smart Contract "fabcar" is instantiated by the blockchain network - * Best practice is to have any Ledger initialization in separate function -- see initLedger() - */ -func (s *SmartContract) Init(APIstub shim.ChaincodeStubInterface) sc.Response { - return shim.Success(nil) -} - -/* - * The Invoke method is called as a result of an application request to run the Smart Contract "fabcar" - * The calling application program has also specified the particular smart contract function to be called, with arguments - */ -func (s *SmartContract) Invoke(APIstub shim.ChaincodeStubInterface) sc.Response { - - // Retrieve the requested Smart Contract function and arguments - function, args := APIstub.GetFunctionAndParameters() - // Route to the appropriate handler function to interact with the ledger appropriately - if function == "queryCar" { - return s.queryCar(APIstub, args) - } else if function == "initLedger" { - return s.initLedger(APIstub) - } else if function == "createCar" { - return s.createCar(APIstub, args) - } else if function == "queryAllCars" { - return s.queryAllCars(APIstub) - } else if function == "changeCarOwner" { - return s.changeCarOwner(APIstub, args) - } - - return shim.Error("Invalid Smart Contract function name.") -} - -func (s *SmartContract) queryCar(APIstub shim.ChaincodeStubInterface, args []string) sc.Response { - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - carAsBytes, _ := APIstub.GetState(args[0]) - return shim.Success(carAsBytes) -} - -func (s *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response { - cars := []Car{ - Car{Make: "Toyota", Model: "Prius", Colour: "blue", Owner: "Tomoko"}, - Car{Make: "Ford", Model: "Mustang", Colour: "red", Owner: "Brad"}, - Car{Make: "Hyundai", Model: "Tucson", Colour: "green", Owner: "Jin Soo"}, - Car{Make: "Volkswagen", Model: "Passat", Colour: "yellow", Owner: "Max"}, - Car{Make: "Tesla", Model: "S", Colour: "black", Owner: "Adriana"}, - Car{Make: "Peugeot", Model: "205", Colour: "purple", Owner: "Michel"}, - Car{Make: "Chery", Model: "S22L", Colour: "white", Owner: "Aarav"}, - Car{Make: "Fiat", Model: "Punto", Colour: "violet", Owner: "Pari"}, - Car{Make: "Tata", Model: "Nano", Colour: "indigo", Owner: "Valeria"}, - Car{Make: "Holden", Model: "Barina", Colour: "brown", Owner: "Shotaro"}, - } - - i := 0 - for i < len(cars) { - fmt.Println("i is ", i) - carAsBytes, _ := json.Marshal(cars[i]) - APIstub.PutState("CAR"+strconv.Itoa(i), carAsBytes) - fmt.Println("Added", cars[i]) - i = i + 1 - } - - return shim.Success(nil) -} - -func (s *SmartContract) createCar(APIstub shim.ChaincodeStubInterface, args []string) sc.Response { - - if len(args) != 5 { - return shim.Error("Incorrect number of arguments. Expecting 5") - } - - var car = Car{Make: args[1], Model: args[2], Colour: args[3], Owner: args[4]} - - carAsBytes, _ := json.Marshal(car) - APIstub.PutState(args[0], carAsBytes) - - return shim.Success(nil) -} - -func (s *SmartContract) queryAllCars(APIstub shim.ChaincodeStubInterface) sc.Response { - - startKey := "CAR0" - endKey := "CAR999" - - resultsIterator, err := APIstub.GetStateByRange(startKey, endKey) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - // buffer is a JSON array containing QueryResults - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - queryResponse, err := resultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"Key\":") - buffer.WriteString("\"") - buffer.WriteString(queryResponse.Key) - buffer.WriteString("\"") - - buffer.WriteString(", \"Record\":") - // Record is a JSON object, so we write as-is - buffer.WriteString(string(queryResponse.Value)) - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Printf("- queryAllCars:\n%s\n", buffer.String()) - - return shim.Success(buffer.Bytes()) -} - -func (s *SmartContract) changeCarOwner(APIstub shim.ChaincodeStubInterface, args []string) sc.Response { - - if len(args) != 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - carAsBytes, _ := APIstub.GetState(args[0]) - car := Car{} - - json.Unmarshal(carAsBytes, &car) - car.Owner = args[1] - - carAsBytes, _ = json.Marshal(car) - APIstub.PutState(args[0], carAsBytes) - - return shim.Success(nil) -} - -// The main function is only relevant in unit test mode. Only included here for completeness. -func main() { - - // Create a new Smart Contract - err := shim.Start(new(SmartContract)) - if err != nil { - fmt.Printf("Error creating new Smart Contract: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/.gitignore b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/.gitignore deleted file mode 100644 index 7005557f9..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/.gitignore +++ /dev/null @@ -1,61 +0,0 @@ - -# -# SPDX-License-Identifier: Apache-2.0 -# - -# Compiled class file -*.class - -# Log file -*.log - -# BlueJ files -*.ctxt - -# Mobile Tools for Java (J2ME) -.mtj.tmp/ - -# Package Files -*.jar -*.war -*.nar -*.ear -*.zip -*.tar.gz -*.rar - -# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml -hs_err_pid* - -# Gradle -.gradle -/build/ - -# Ignore Gradle GUI config -gradle-app.setting - -# Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored) -!gradle-wrapper.jar - -# Cache of project -.gradletasknamecache - -# # Work around https://youtrack.jetbrains.com/issue/IDEA-116898 -# gradle/wrapper/gradle-wrapper.properties - -# Eclipse files -.project -.classpath -.metadata -bin/ -tmp/ -*.tmp -*.bak -*.swp -*~.nib -local.properties -.settings/ -.loadpath -.recommenders -.externalToolBuilders/ -*.launch diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/README.md b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/README.md deleted file mode 100644 index 581c0a4ce..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Java FabCar contract sample - -The directions for using this sample are documented in the Hyperledger Fabric -[Writing Your First Application](https://hyperledger-fabric.readthedocs.io/en/latest/write_first_app.html) tutorial. - -The tutorial is based on JavaScript, however the same concepts are applicable when using Java. - -To install and instantiate the Java version of `FabCar`, use the following command instead of the command shown in the [Launch the network](https://hyperledger-fabric.readthedocs.io/en/release-1.4/write_first_app.html#launch-the-network) section of the tutorial: - -``` -./startFabric.sh javascript -``` - -*NOTE:* After navigating to the documentation, choose the documentation version that matches your version of Fabric diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/build.gradle b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/build.gradle deleted file mode 100644 index 50e8f5243..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/build.gradle +++ /dev/null @@ -1,81 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -plugins { - id 'checkstyle' - id 'com.github.johnrengelman.shadow' version '2.0.4' - id 'java-library' - id 'jacoco' -} - -group 'org.hyperledger.fabric.samples' -version '1.0-SNAPSHOT' - -dependencies { - implementation 'org.hyperledger.fabric-chaincode-java:fabric-chaincode-shim:1.4.3' - implementation 'com.owlike:genson:1.5' - testImplementation 'org.junit.jupiter:junit-jupiter:5.4.2' - testImplementation 'org.assertj:assertj-core:3.11.1' - testImplementation 'org.mockito:mockito-core:2.+' -} - -repositories { - maven { - url "https://nexus.hyperledger.org/content/repositories/snapshots/" - } - jcenter() - maven { - url 'https://jitpack.io' - } -} - -checkstyle { - toolVersion '8.21' - configFile file("config/checkstyle/checkstyle.xml") -} - -checkstyleMain { - source ='src/main/java' -} - -checkstyleTest { - source ='src/test/java' -} - -shadowJar { - baseName = 'chaincode' - version = null - classifier = null - manifest { - attributes 'Main-Class': 'org.hyperledger.fabric.contract.ContractRouter' - } -} - -jacocoTestCoverageVerification { - afterEvaluate { - classDirectories = files(classDirectories.files.collect { - fileTree(dir: it, exclude: [ - 'org/hyperledger/fabric/samples/fabcar/Start.*' - ]) - }) - } - violationRules { - rule { - limit { - minimum = 1.0 - } - } - } - - finalizedBy jacocoTestReport -} - -test { - useJUnitPlatform() - testLogging { - events "passed", "skipped", "failed" - } -} - -check.dependsOn jacocoTestCoverageVerification diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/config/checkstyle/checkstyle.xml b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/config/checkstyle/checkstyle.xml deleted file mode 100644 index 94317559e..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/config/checkstyle/checkstyle.xml +++ /dev/null @@ -1,178 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/config/checkstyle/suppressions.xml b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/config/checkstyle/suppressions.xml deleted file mode 100644 index 8c44b0a03..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/config/checkstyle/suppressions.xml +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradle/wrapper/gradle-wrapper.jar b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradle/wrapper/gradle-wrapper.jar deleted file mode 100644 index 29953ea14..000000000 Binary files a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradle/wrapper/gradle-wrapper.jar and /dev/null differ diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradle/wrapper/gradle-wrapper.properties b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradle/wrapper/gradle-wrapper.properties deleted file mode 100644 index e0b3fb8d7..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradle/wrapper/gradle-wrapper.properties +++ /dev/null @@ -1,5 +0,0 @@ -distributionBase=GRADLE_USER_HOME -distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.2-bin.zip -zipStoreBase=GRADLE_USER_HOME -zipStorePath=wrapper/dists diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradlew b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradlew deleted file mode 100755 index cccdd3d51..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradlew +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env sh - -############################################################################## -## -## Gradle start up script for UN*X -## -############################################################################## - -# Attempt to set APP_HOME -# Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi -done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" - -warn () { - echo "$*" -} - -die () { - echo - echo "$*" - echo - exit 1 -} - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." -fi - -# Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi -fi - -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi - -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi - # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" - fi - i=$((i+1)) - done - case $i in - (0) set -- ;; - (1) set -- "$args0" ;; - (2) set -- "$args0" "$args1" ;; - (3) set -- "$args0" "$args1" "$args2" ;; - (4) set -- "$args0" "$args1" "$args2" "$args3" ;; - (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac -fi - -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=$(save "$@") - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" - -# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong -if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then - cd "$(dirname "$0")" -fi - -exec "$JAVACMD" "$@" diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradlew.bat b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradlew.bat deleted file mode 100644 index e95643d6a..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/gradlew.bat +++ /dev/null @@ -1,84 +0,0 @@ -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/settings.gradle b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/settings.gradle deleted file mode 100644 index 4d04f71e0..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/settings.gradle +++ /dev/null @@ -1,5 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -rootProject.name = 'java-chaincode-bootstrap' diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/main/java/org/hyperledger/fabric/samples/fabcar/Car.java b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/main/java/org/hyperledger/fabric/samples/fabcar/Car.java deleted file mode 100644 index a67204a74..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/main/java/org/hyperledger/fabric/samples/fabcar/Car.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.hyperledger.fabric.samples.fabcar; - -import java.util.Objects; - -import org.hyperledger.fabric.contract.annotation.DataType; -import org.hyperledger.fabric.contract.annotation.Property; - -import com.owlike.genson.annotation.JsonProperty; - -@DataType() -public final class Car { - - @Property() - private final String make; - - @Property() - private final String model; - - @Property() - private final String color; - - @Property() - private final String owner; - - public String getMake() { - return make; - } - - public String getModel() { - return model; - } - - public String getColor() { - return color; - } - - public String getOwner() { - return owner; - } - - public Car(@JsonProperty("make") final String make, @JsonProperty("model") final String model, - @JsonProperty("color") final String color, @JsonProperty("owner") final String owner) { - this.make = make; - this.model = model; - this.color = color; - this.owner = owner; - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - Car other = (Car) obj; - - return Objects.deepEquals(new String[] {getMake(), getModel(), getColor(), getOwner()}, - new String[] {other.getMake(), other.getModel(), other.getColor(), other.getOwner()}); - } - - @Override - public int hashCode() { - return Objects.hash(getMake(), getModel(), getColor(), getOwner()); - } - - @Override - public String toString() { - return this.getClass().getSimpleName() + "@" + Integer.toHexString(hashCode()) + " [make=" + make + ", model=" - + model + ", color=" + color + ", owner=" + owner + "]"; - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/main/java/org/hyperledger/fabric/samples/fabcar/FabCar.java b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/main/java/org/hyperledger/fabric/samples/fabcar/FabCar.java deleted file mode 100644 index a4e8b3539..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/main/java/org/hyperledger/fabric/samples/fabcar/FabCar.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.hyperledger.fabric.samples.fabcar; - -import java.util.ArrayList; -import java.util.List; - -import org.hyperledger.fabric.contract.Context; -import org.hyperledger.fabric.contract.ContractInterface; -import org.hyperledger.fabric.contract.annotation.Contact; -import org.hyperledger.fabric.contract.annotation.Contract; -import org.hyperledger.fabric.contract.annotation.Default; -import org.hyperledger.fabric.contract.annotation.Info; -import org.hyperledger.fabric.contract.annotation.License; -import org.hyperledger.fabric.contract.annotation.Transaction; -import org.hyperledger.fabric.shim.ChaincodeException; -import org.hyperledger.fabric.shim.ChaincodeStub; -import org.hyperledger.fabric.shim.ledger.KeyValue; -import org.hyperledger.fabric.shim.ledger.QueryResultsIterator; - -import com.owlike.genson.Genson; - -/** - * Java implementation of the Fabric Car Contract described in the Writing Your - * First Application tutorial - */ -@Contract( - name = "FabCar", - info = @Info( - title = "FabCar contract", - description = "The hyperlegendary car contract", - version = "0.0.1-SNAPSHOT", - license = @License( - name = "Apache 2.0 License", - url = "http://www.apache.org/licenses/LICENSE-2.0.html"), - contact = @Contact( - email = "f.carr@example.com", - name = "F Carr", - url = "https://hyperledger.example.com"))) -@Default -public final class FabCar implements ContractInterface { - - private final Genson genson = new Genson(); - - private enum FabCarErrors { - CAR_NOT_FOUND, - CAR_ALREADY_EXISTS - } - - /** - * Retrieves a car with the specified key from the ledger. - * - * @param ctx the transaction context - * @param key the key - * @return the Car found on the ledger if there was one - */ - @Transaction() - public Car queryCar(final Context ctx, final String key) { - ChaincodeStub stub = ctx.getStub(); - String carState = stub.getStringState(key); - - if (carState.isEmpty()) { - String errorMessage = String.format("Car %s does not exist", key); - System.out.println(errorMessage); - throw new ChaincodeException(errorMessage, FabCarErrors.CAR_NOT_FOUND.toString()); - } - - Car car = genson.deserialize(carState, Car.class); - - return car; - } - - /** - * Creates some initial Cars on the ledger. - * - * @param ctx the transaction context - */ - @Transaction() - public void initLedger(final Context ctx) { - ChaincodeStub stub = ctx.getStub(); - - String[] carData = { - "{ \"make\": \"Toyota\", \"model\": \"Prius\", \"color\": \"blue\", \"owner\": \"Tomoko\" }", - "{ \"make\": \"Ford\", \"model\": \"Mustang\", \"color\": \"red\", \"owner\": \"Brad\" }", - "{ \"make\": \"Hyundai\", \"model\": \"Tucson\", \"color\": \"green\", \"owner\": \"Jin Soo\" }", - "{ \"make\": \"Volkswagen\", \"model\": \"Passat\", \"color\": \"yellow\", \"owner\": \"Max\" }", - "{ \"make\": \"Tesla\", \"model\": \"S\", \"color\": \"black\", \"owner\": \"Adrian\" }", - "{ \"make\": \"Peugeot\", \"model\": \"205\", \"color\": \"purple\", \"owner\": \"Michel\" }", - "{ \"make\": \"Chery\", \"model\": \"S22L\", \"color\": \"white\", \"owner\": \"Aarav\" }", - "{ \"make\": \"Fiat\", \"model\": \"Punto\", \"color\": \"violet\", \"owner\": \"Pari\" }", - "{ \"make\": \"Tata\", \"model\": \"nano\", \"color\": \"indigo\", \"owner\": \"Valeria\" }", - "{ \"make\": \"Holden\", \"model\": \"Barina\", \"color\": \"brown\", \"owner\": \"Shotaro\" }" - }; - - for (int i = 0; i < carData.length; i++) { - String key = String.format("CAR%03d", i); - - Car car = genson.deserialize(carData[i], Car.class); - String carState = genson.serialize(car); - stub.putStringState(key, carState); - } - } - - /** - * Creates a new car on the ledger. - * - * @param ctx the transaction context - * @param key the key for the new car - * @param make the make of the new car - * @param model the model of the new car - * @param color the color of the new car - * @param owner the owner of the new car - * @return the created Car - */ - @Transaction() - public Car createCar(final Context ctx, final String key, final String make, final String model, - final String color, final String owner) { - ChaincodeStub stub = ctx.getStub(); - - String carState = stub.getStringState(key); - if (!carState.isEmpty()) { - String errorMessage = String.format("Car %s already exists", key); - System.out.println(errorMessage); - throw new ChaincodeException(errorMessage, FabCarErrors.CAR_ALREADY_EXISTS.toString()); - } - - Car car = new Car(make, model, color, owner); - carState = genson.serialize(car); - stub.putStringState(key, carState); - - return car; - } - - /** - * Retrieves every car between CAR0 and CAR999 from the ledger. - * - * @param ctx the transaction context - * @return array of Cars found on the ledger - */ - @Transaction() - public Car[] queryAllCars(final Context ctx) { - ChaincodeStub stub = ctx.getStub(); - - final String startKey = "CAR0"; - final String endKey = "CAR999"; - List cars = new ArrayList(); - - QueryResultsIterator results = stub.getStateByRange(startKey, endKey); - - for (KeyValue result: results) { - Car car = genson.deserialize(result.getStringValue(), Car.class); - cars.add(car); - } - - Car[] response = cars.toArray(new Car[cars.size()]); - - return response; - } - - /** - * Changes the owner of a car on the ledger. - * - * @param ctx the transaction context - * @param key the key - * @param newOwner the new owner - * @return the updated Car - */ - @Transaction() - public Car changeCarOwner(final Context ctx, final String key, final String newOwner) { - ChaincodeStub stub = ctx.getStub(); - - String carState = stub.getStringState(key); - - if (carState.isEmpty()) { - String errorMessage = String.format("Car %s does not exist", key); - System.out.println(errorMessage); - throw new ChaincodeException(errorMessage, FabCarErrors.CAR_NOT_FOUND.toString()); - } - - Car car = genson.deserialize(carState, Car.class); - - Car newCar = new Car(car.getMake(), car.getModel(), car.getColor(), newOwner); - String newCarState = genson.serialize(newCar); - stub.putStringState(key, newCarState); - - return newCar; - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/test/java/org/hyperledger/fabric/samples/fabcar/CarTest.java b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/test/java/org/hyperledger/fabric/samples/fabcar/CarTest.java deleted file mode 100644 index 5c7b4fcff..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/test/java/org/hyperledger/fabric/samples/fabcar/CarTest.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.hyperledger.fabric.samples.fabcar; - -import static org.assertj.core.api.Assertions.assertThat; - -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; - -public final class CarTest { - - @Nested - class Equality { - - @Test - public void isReflexive() { - Car car = new Car("Toyota", "Prius", "blue", "Tomoko"); - - assertThat(car).isEqualTo(car); - } - - @Test - public void isSymmetric() { - Car carA = new Car("Toyota", "Prius", "blue", "Tomoko"); - Car carB = new Car("Toyota", "Prius", "blue", "Tomoko"); - - assertThat(carA).isEqualTo(carB); - assertThat(carB).isEqualTo(carA); - } - - @Test - public void isTransitive() { - Car carA = new Car("Toyota", "Prius", "blue", "Tomoko"); - Car carB = new Car("Toyota", "Prius", "blue", "Tomoko"); - Car carC = new Car("Toyota", "Prius", "blue", "Tomoko"); - - assertThat(carA).isEqualTo(carB); - assertThat(carB).isEqualTo(carC); - assertThat(carA).isEqualTo(carC); - } - - @Test - public void handlesInequality() { - Car carA = new Car("Toyota", "Prius", "blue", "Tomoko"); - Car carB = new Car("Ford", "Mustang", "red", "Brad"); - - assertThat(carA).isNotEqualTo(carB); - } - - @Test - public void handlesOtherObjects() { - Car carA = new Car("Toyota", "Prius", "blue", "Tomoko"); - String carB = "not a car"; - - assertThat(carA).isNotEqualTo(carB); - } - - @Test - public void handlesNull() { - Car car = new Car("Toyota", "Prius", "blue", "Tomoko"); - - assertThat(car).isNotEqualTo(null); - } - } - - @Test - public void toStringIdentifiesCar() { - Car car = new Car("Toyota", "Prius", "blue", "Tomoko"); - - assertThat(car.toString()).isEqualTo("Car@61a77e4f [make=Toyota, model=Prius, color=blue, owner=Tomoko]"); - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/test/java/org/hyperledger/fabric/samples/fabcar/FabCarTest.java b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/test/java/org/hyperledger/fabric/samples/fabcar/FabCarTest.java deleted file mode 100644 index 0579a5380..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/java/src/test/java/org/hyperledger/fabric/samples/fabcar/FabCarTest.java +++ /dev/null @@ -1,262 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.hyperledger.fabric.samples.fabcar; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.ThrowableAssert.catchThrowable; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -import org.hyperledger.fabric.contract.Context; -import org.hyperledger.fabric.shim.ChaincodeException; -import org.hyperledger.fabric.shim.ChaincodeStub; -import org.hyperledger.fabric.shim.ledger.KeyValue; -import org.hyperledger.fabric.shim.ledger.QueryResultsIterator; -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; -import org.mockito.InOrder; - -public final class FabCarTest { - - private final class MockKeyValue implements KeyValue { - - private final String key; - private final String value; - - MockKeyValue(final String key, final String value) { - super(); - this.key = key; - this.value = value; - } - - @Override - public String getKey() { - return this.key; - } - - @Override - public String getStringValue() { - return this.value; - } - - @Override - public byte[] getValue() { - return this.value.getBytes(); - } - - } - - private final class MockCarResultsIterator implements QueryResultsIterator { - - private final List carList; - - MockCarResultsIterator() { - super(); - - carList = new ArrayList(); - - carList.add(new MockKeyValue("CAR000", - "{\"color\":\"blue\",\"make\":\"Toyota\",\"model\":\"Prius\",\"owner\":\"Tomoko\"}")); - carList.add(new MockKeyValue("CAR001", - "{\"color\":\"red\",\"make\":\"Ford\",\"model\":\"Mustang\",\"owner\":\"Brad\"}")); - carList.add(new MockKeyValue("CAR002", - "{\"color\":\"green\",\"make\":\"Hyundai\",\"model\":\"Tucson\",\"owner\":\"Jin Soo\"}")); - carList.add(new MockKeyValue("CAR007", - "{\"color\":\"violet\",\"make\":\"Fiat\",\"model\":\"Punto\",\"owner\":\"Pari\"}")); - carList.add(new MockKeyValue("CAR009", - "{\"color\":\"brown\",\"make\":\"Holden\",\"model\":\"Barina\",\"owner\":\"Shotaro\"}")); - } - - @Override - public Iterator iterator() { - return carList.iterator(); - } - - @Override - public void close() throws Exception { - // do nothing - } - - } - - @Test - public void invokeUnknownTransaction() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - - Throwable thrown = catchThrowable(() -> { - contract.unknownTransaction(ctx); - }); - - assertThat(thrown).isInstanceOf(ChaincodeException.class).hasNoCause() - .hasMessage("Undefined contract method called"); - assertThat(((ChaincodeException) thrown).getPayload()).isEqualTo(null); - - verifyZeroInteractions(ctx); - } - - @Nested - class InvokeQueryCarTransaction { - - @Test - public void whenCarExists() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - ChaincodeStub stub = mock(ChaincodeStub.class); - when(ctx.getStub()).thenReturn(stub); - when(stub.getStringState("CAR000")) - .thenReturn("{\"color\":\"blue\",\"make\":\"Toyota\",\"model\":\"Prius\",\"owner\":\"Tomoko\"}"); - - Car car = contract.queryCar(ctx, "CAR000"); - - assertThat(car).isEqualTo(new Car("Toyota", "Prius", "blue", "Tomoko")); - } - - @Test - public void whenCarDoesNotExist() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - ChaincodeStub stub = mock(ChaincodeStub.class); - when(ctx.getStub()).thenReturn(stub); - when(stub.getStringState("CAR000")).thenReturn(""); - - Throwable thrown = catchThrowable(() -> { - contract.queryCar(ctx, "CAR000"); - }); - - assertThat(thrown).isInstanceOf(ChaincodeException.class).hasNoCause() - .hasMessage("Car CAR000 does not exist"); - assertThat(((ChaincodeException) thrown).getPayload()).isEqualTo("CAR_NOT_FOUND".getBytes()); - } - } - - @Test - void invokeInitLedgerTransaction() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - ChaincodeStub stub = mock(ChaincodeStub.class); - when(ctx.getStub()).thenReturn(stub); - - contract.initLedger(ctx); - - InOrder inOrder = inOrder(stub); - inOrder.verify(stub).putStringState("CAR000", - "{\"color\":\"blue\",\"make\":\"Toyota\",\"model\":\"Prius\",\"owner\":\"Tomoko\"}"); - inOrder.verify(stub).putStringState("CAR001", - "{\"color\":\"red\",\"make\":\"Ford\",\"model\":\"Mustang\",\"owner\":\"Brad\"}"); - inOrder.verify(stub).putStringState("CAR002", - "{\"color\":\"green\",\"make\":\"Hyundai\",\"model\":\"Tucson\",\"owner\":\"Jin Soo\"}"); - inOrder.verify(stub).putStringState("CAR003", - "{\"color\":\"yellow\",\"make\":\"Volkswagen\",\"model\":\"Passat\",\"owner\":\"Max\"}"); - inOrder.verify(stub).putStringState("CAR004", - "{\"color\":\"black\",\"make\":\"Tesla\",\"model\":\"S\",\"owner\":\"Adrian\"}"); - inOrder.verify(stub).putStringState("CAR005", - "{\"color\":\"purple\",\"make\":\"Peugeot\",\"model\":\"205\",\"owner\":\"Michel\"}"); - inOrder.verify(stub).putStringState("CAR006", - "{\"color\":\"white\",\"make\":\"Chery\",\"model\":\"S22L\",\"owner\":\"Aarav\"}"); - inOrder.verify(stub).putStringState("CAR007", - "{\"color\":\"violet\",\"make\":\"Fiat\",\"model\":\"Punto\",\"owner\":\"Pari\"}"); - inOrder.verify(stub).putStringState("CAR008", - "{\"color\":\"indigo\",\"make\":\"Tata\",\"model\":\"nano\",\"owner\":\"Valeria\"}"); - inOrder.verify(stub).putStringState("CAR009", - "{\"color\":\"brown\",\"make\":\"Holden\",\"model\":\"Barina\",\"owner\":\"Shotaro\"}"); - } - - @Nested - class InvokeCreateCarTransaction { - - @Test - public void whenCarExists() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - ChaincodeStub stub = mock(ChaincodeStub.class); - when(ctx.getStub()).thenReturn(stub); - when(stub.getStringState("CAR000")) - .thenReturn("{\"color\":\"blue\",\"make\":\"Toyota\",\"model\":\"Prius\",\"owner\":\"Tomoko\"}"); - - Throwable thrown = catchThrowable(() -> { - contract.createCar(ctx, "CAR000", "Nissan", "Leaf", "green", "Siobhán"); - }); - - assertThat(thrown).isInstanceOf(ChaincodeException.class).hasNoCause() - .hasMessage("Car CAR000 already exists"); - assertThat(((ChaincodeException) thrown).getPayload()).isEqualTo("CAR_ALREADY_EXISTS".getBytes()); - } - - @Test - public void whenCarDoesNotExist() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - ChaincodeStub stub = mock(ChaincodeStub.class); - when(ctx.getStub()).thenReturn(stub); - when(stub.getStringState("CAR000")).thenReturn(""); - - Car car = contract.createCar(ctx, "CAR000", "Nissan", "Leaf", "green", "Siobhán"); - - assertThat(car).isEqualTo(new Car("Nissan", "Leaf", "green", "Siobhán")); - } - } - - @Test - void invokeQueryAllCarsTransaction() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - ChaincodeStub stub = mock(ChaincodeStub.class); - when(ctx.getStub()).thenReturn(stub); - when(stub.getStateByRange("CAR0", "CAR999")).thenReturn(new MockCarResultsIterator()); - - Car[] cars = contract.queryAllCars(ctx); - - final List expectedCars = new ArrayList(); - expectedCars.add(new Car("Toyota", "Prius", "blue", "Tomoko")); - expectedCars.add(new Car("Ford", "Mustang", "red", "Brad")); - expectedCars.add(new Car("Hyundai", "Tucson", "green", "Jin Soo")); - expectedCars.add(new Car("Fiat", "Punto", "violet", "Pari")); - expectedCars.add(new Car("Holden", "Barina", "brown", "Shotaro")); - - assertThat(cars).containsExactlyElementsOf(expectedCars); - } - - @Nested - class ChangeCarOwnerTransaction { - - @Test - public void whenCarExists() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - ChaincodeStub stub = mock(ChaincodeStub.class); - when(ctx.getStub()).thenReturn(stub); - when(stub.getStringState("CAR000")) - .thenReturn("{\"color\":\"blue\",\"make\":\"Toyota\",\"model\":\"Prius\",\"owner\":\"Tomoko\"}"); - - Car car = contract.changeCarOwner(ctx, "CAR000", "Dr Evil"); - - assertThat(car).isEqualTo(new Car("Toyota", "Prius", "blue", "Dr Evil")); - } - - @Test - public void whenCarDoesNotExist() { - FabCar contract = new FabCar(); - Context ctx = mock(Context.class); - ChaincodeStub stub = mock(ChaincodeStub.class); - when(ctx.getStub()).thenReturn(stub); - when(stub.getStringState("CAR000")).thenReturn(""); - - Throwable thrown = catchThrowable(() -> { - contract.changeCarOwner(ctx, "CAR000", "Dr Evil"); - }); - - assertThat(thrown).isInstanceOf(ChaincodeException.class).hasNoCause() - .hasMessage("Car CAR000 does not exist"); - assertThat(((ChaincodeException) thrown).getPayload()).isEqualTo("CAR_NOT_FOUND".getBytes()); - } - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript-low-level/fabcar.js b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript-low-level/fabcar.js deleted file mode 100644 index c8473c371..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript-low-level/fabcar.js +++ /dev/null @@ -1,193 +0,0 @@ -/* -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -*/ - -'use strict'; -const shim = require('fabric-shim'); -const util = require('util'); - -let Chaincode = class { - - // The Init method is called when the Smart Contract 'fabcar' is instantiated by the blockchain network - // Best practice is to have any Ledger initialization in separate function -- see initLedger() - async Init(stub) { - console.info('=========== Instantiated fabcar chaincode ==========='); - return shim.success(); - } - - // The Invoke method is called as a result of an application request to run the Smart Contract - // 'fabcar'. The calling application program has also specified the particular smart contract - // function to be called, with arguments - async Invoke(stub) { - let ret = stub.getFunctionAndParameters(); - console.info(ret); - - let method = this[ret.fcn]; - if (!method) { - console.error('no function of name:' + ret.fcn + ' found'); - throw new Error('Received unknown function ' + ret.fcn + ' invocation'); - } - try { - let payload = await method(stub, ret.params); - return shim.success(payload); - } catch (err) { - console.log(err); - return shim.error(err); - } - } - - async queryCar(stub, args) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting CarNumber ex: CAR01'); - } - let carNumber = args[0]; - - let carAsBytes = await stub.getState(carNumber); //get the car from chaincode state - if (!carAsBytes || carAsBytes.toString().length <= 0) { - throw new Error(carNumber + ' does not exist: '); - } - console.log(carAsBytes.toString()); - return carAsBytes; - } - - async initLedger(stub, args) { - console.info('============= START : Initialize Ledger ==========='); - let cars = []; - cars.push({ - make: 'Toyota', - model: 'Prius', - color: 'blue', - owner: 'Tomoko' - }); - cars.push({ - make: 'Ford', - model: 'Mustang', - color: 'red', - owner: 'Brad' - }); - cars.push({ - make: 'Hyundai', - model: 'Tucson', - color: 'green', - owner: 'Jin Soo' - }); - cars.push({ - make: 'Volkswagen', - model: 'Passat', - color: 'yellow', - owner: 'Max' - }); - cars.push({ - make: 'Tesla', - model: 'S', - color: 'black', - owner: 'Adriana' - }); - cars.push({ - make: 'Peugeot', - model: '205', - color: 'purple', - owner: 'Michel' - }); - cars.push({ - make: 'Chery', - model: 'S22L', - color: 'white', - owner: 'Aarav' - }); - cars.push({ - make: 'Fiat', - model: 'Punto', - color: 'violet', - owner: 'Pari' - }); - cars.push({ - make: 'Tata', - model: 'Nano', - color: 'indigo', - owner: 'Valeria' - }); - cars.push({ - make: 'Holden', - model: 'Barina', - color: 'brown', - owner: 'Shotaro' - }); - - for (let i = 0; i < cars.length; i++) { - cars[i].docType = 'car'; - await stub.putState('CAR' + i, Buffer.from(JSON.stringify(cars[i]))); - console.info('Added <--> ', cars[i]); - } - console.info('============= END : Initialize Ledger ==========='); - } - - async createCar(stub, args) { - console.info('============= START : Create Car ==========='); - if (args.length != 5) { - throw new Error('Incorrect number of arguments. Expecting 5'); - } - - var car = { - docType: 'car', - make: args[1], - model: args[2], - color: args[3], - owner: args[4] - }; - - await stub.putState(args[0], Buffer.from(JSON.stringify(car))); - console.info('============= END : Create Car ==========='); - } - - async queryAllCars(stub, args) { - - let startKey = 'CAR0'; - let endKey = 'CAR999'; - - let iterator = await stub.getStateByRange(startKey, endKey); - - let allResults = []; - while (true) { - let res = await iterator.next(); - - if (res.value && res.value.value.toString()) { - let jsonRes = {}; - console.log(res.value.value.toString('utf8')); - - jsonRes.Key = res.value.key; - try { - jsonRes.Record = JSON.parse(res.value.value.toString('utf8')); - } catch (err) { - console.log(err); - jsonRes.Record = res.value.value.toString('utf8'); - } - allResults.push(jsonRes); - } - if (res.done) { - console.log('end of data'); - await iterator.close(); - console.info(allResults); - return Buffer.from(JSON.stringify(allResults)); - } - } - } - - async changeCarOwner(stub, args) { - console.info('============= START : changeCarOwner ==========='); - if (args.length != 2) { - throw new Error('Incorrect number of arguments. Expecting 2'); - } - - let carAsBytes = await stub.getState(args[0]); - let car = JSON.parse(carAsBytes); - car.owner = args[1]; - - await stub.putState(args[0], Buffer.from(JSON.stringify(car))); - console.info('============= END : changeCarOwner ==========='); - } -}; - -shim.start(new Chaincode()); diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript-low-level/package.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript-low-level/package.json deleted file mode 100644 index 9ab05de60..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript-low-level/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "fabcar", - "version": "1.0.0", - "description": "fabcar chaincode implemented in node.js", - "engines": { - "node": ">=8.4.0", - "npm": ">=5.3.0" - }, - "scripts": { - "start": "node fabcar.js" - }, - "engine-strict": true, - "license": "Apache-2.0", - "dependencies": { - "fabric-shim": "~1.4.0" - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.editorconfig b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.editorconfig deleted file mode 100755 index 75a13be20..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# - -root = true - -[*] -indent_style = space -indent_size = 4 -end_of_line = lf -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true - -[*.md] -trim_trailing_whitespace = false diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.eslintignore b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.eslintignore deleted file mode 100644 index 159584701..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.eslintignore +++ /dev/null @@ -1,5 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# - -coverage diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.eslintrc.js b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.eslintrc.js deleted file mode 100644 index 6d5751a5d..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.eslintrc.js +++ /dev/null @@ -1,38 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -module.exports = { - env: { - node: true, - mocha: true - }, - parserOptions: { - ecmaVersion: 8, - sourceType: 'script' - }, - extends: "eslint:recommended", - rules: { - indent: ['error', 4], - 'linebreak-style': ['error', 'unix'], - quotes: ['error', 'single'], - semi: ['error', 'always'], - 'no-unused-vars': ['error', { args: 'none' }], - 'no-console': 'off', - curly: 'error', - eqeqeq: 'error', - 'no-throw-literal': 'error', - strict: 'error', - 'no-var': 'error', - 'dot-notation': 'error', - 'no-tabs': 'error', - 'no-trailing-spaces': 'error', - 'no-use-before-define': 'error', - 'no-useless-call': 'error', - 'no-with': 'error', - 'operator-linebreak': 'error', - yoda: 'error', - 'quote-props': ['error', 'as-needed'], - 'no-constant-condition': ["error", { "checkLoops": false }] - } -}; diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.gitignore b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.gitignore deleted file mode 100644 index a00ca9415..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/.gitignore +++ /dev/null @@ -1,77 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# - -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories -node_modules/ -jspm_packages/ - -# TypeScript v1 declaration files -typings/ - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache - -# Optional REPL history -.node_repl_history - -# Output of 'npm pack' -*.tgz - -# Yarn Integrity file -.yarn-integrity - -# dotenv environment variables file -.env - -# parcel-bundler cache (https://parceljs.org/) -.cache - -# next.js build output -.next - -# nuxt.js build output -.nuxt - -# vuepress build output -.vuepress/dist - -# Serverless directories -.serverless diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/index.js b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/index.js deleted file mode 100644 index f5911c8c0..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/index.js +++ /dev/null @@ -1,10 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict'; - -const FabCar = require('./lib/fabcar'); - -module.exports.FabCar = FabCar; -module.exports.contracts = [ FabCar ]; diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/lib/fabcar.js b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/lib/fabcar.js deleted file mode 100644 index 53f2faefa..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/lib/fabcar.js +++ /dev/null @@ -1,156 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict'; - -const { Contract } = require('fabric-contract-api'); - -class FabCar extends Contract { - - async initLedger(ctx) { - console.info('============= START : Initialize Ledger ==========='); - const cars = [ - { - color: 'blue', - make: 'Toyota', - model: 'Prius', - owner: 'Tomoko', - }, - { - color: 'red', - make: 'Ford', - model: 'Mustang', - owner: 'Brad', - }, - { - color: 'green', - make: 'Hyundai', - model: 'Tucson', - owner: 'Jin Soo', - }, - { - color: 'yellow', - make: 'Volkswagen', - model: 'Passat', - owner: 'Max', - }, - { - color: 'black', - make: 'Tesla', - model: 'S', - owner: 'Adriana', - }, - { - color: 'purple', - make: 'Peugeot', - model: '205', - owner: 'Michel', - }, - { - color: 'white', - make: 'Chery', - model: 'S22L', - owner: 'Aarav', - }, - { - color: 'violet', - make: 'Fiat', - model: 'Punto', - owner: 'Pari', - }, - { - color: 'indigo', - make: 'Tata', - model: 'Nano', - owner: 'Valeria', - }, - { - color: 'brown', - make: 'Holden', - model: 'Barina', - owner: 'Shotaro', - }, - ]; - - for (let i = 0; i < cars.length; i++) { - cars[i].docType = 'car'; - await ctx.stub.putState('CAR' + i, Buffer.from(JSON.stringify(cars[i]))); - console.info('Added <--> ', cars[i]); - } - console.info('============= END : Initialize Ledger ==========='); - } - - async queryCar(ctx, carNumber) { - const carAsBytes = await ctx.stub.getState(carNumber); // get the car from chaincode state - if (!carAsBytes || carAsBytes.length === 0) { - throw new Error(`${carNumber} does not exist`); - } - console.log(carAsBytes.toString()); - return carAsBytes.toString(); - } - - async createCar(ctx, carNumber, make, model, color, owner) { - console.info('============= START : Create Car ==========='); - - const car = { - color, - docType: 'car', - make, - model, - owner, - }; - - await ctx.stub.putState(carNumber, Buffer.from(JSON.stringify(car))); - console.info('============= END : Create Car ==========='); - } - - async queryAllCars(ctx) { - const startKey = 'CAR0'; - const endKey = 'CAR999'; - - const iterator = await ctx.stub.getStateByRange(startKey, endKey); - - const allResults = []; - while (true) { - const res = await iterator.next(); - - if (res.value && res.value.value.toString()) { - console.log(res.value.value.toString('utf8')); - - const Key = res.value.key; - let Record; - try { - Record = JSON.parse(res.value.value.toString('utf8')); - } catch (err) { - console.log(err); - Record = res.value.value.toString('utf8'); - } - allResults.push({ Key, Record }); - } - if (res.done) { - console.log('end of data'); - await iterator.close(); - console.info(allResults); - return JSON.stringify(allResults); - } - } - } - - async changeCarOwner(ctx, carNumber, newOwner) { - console.info('============= START : changeCarOwner ==========='); - - const carAsBytes = await ctx.stub.getState(carNumber); // get the car from chaincode state - if (!carAsBytes || carAsBytes.length === 0) { - throw new Error(`${carNumber} does not exist`); - } - const car = JSON.parse(carAsBytes.toString()); - car.owner = newOwner; - - await ctx.stub.putState(carNumber, Buffer.from(JSON.stringify(car))); - console.info('============= END : changeCarOwner ==========='); - } - -} - -module.exports = FabCar; diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/package.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/package.json deleted file mode 100644 index abe745587..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/javascript/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "fabcar", - "version": "1.0.0", - "description": "FabCar contract implemented in JavaScript", - "main": "index.js", - "engines": { - "node": ">=8", - "npm": ">=5" - }, - "scripts": { - "lint": "eslint .", - "pretest": "npm run lint", - "test": "nyc mocha --recursive", - "start": "fabric-chaincode-node start" - }, - "engineStrict": true, - "author": "Hyperledger", - "license": "Apache-2.0", - "dependencies": { - "fabric-contract-api": "~1.4.0", - "fabric-shim": "~1.4.0" - }, - "devDependencies": { - "chai": "^4.1.2", - "eslint": "^4.19.1", - "mocha": "^5.2.0", - "nyc": "^12.0.2", - "sinon": "^6.0.0", - "sinon-chai": "^3.2.0" - }, - "nyc": { - "exclude": [ - "coverage/**", - "test/**" - ], - "reporter": [ - "text-summary", - "html" - ], - "all": true, - "check-coverage": true, - "statements": 100, - "branches": 100, - "functions": 100, - "lines": 100 - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/.editorconfig b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/.editorconfig deleted file mode 100755 index 75a13be20..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# - -root = true - -[*] -indent_style = space -indent_size = 4 -end_of_line = lf -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true - -[*.md] -trim_trailing_whitespace = false diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/.gitignore b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/.gitignore deleted file mode 100644 index 69d6a33bb..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/.gitignore +++ /dev/null @@ -1,81 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# - -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories -node_modules/ -jspm_packages/ - -# TypeScript v1 declaration files -typings/ - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache - -# Optional REPL history -.node_repl_history - -# Output of 'npm pack' -*.tgz - -# Yarn Integrity file -.yarn-integrity - -# dotenv environment variables file -.env - -# parcel-bundler cache (https://parceljs.org/) -.cache - -# next.js build output -.next - -# nuxt.js build output -.nuxt - -# vuepress build output -.vuepress/dist - -# Serverless directories -.serverless - -# Compiled TypeScript files -dist - diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/package.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/package.json deleted file mode 100644 index ba2d5753d..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/package.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "name": "fabcar", - "version": "1.0.0", - "description": "FabCar contract implemented in TypeScript", - "main": "dist/index.js", - "typings": "dist/index.d.ts", - "engines": { - "node": ">=8", - "npm": ">=5" - }, - "scripts": { - "lint": "tslint -c tslint.json 'src/**/*.ts'", - "pretest": "npm run lint", - "test": "nyc mocha -r ts-node/register src/**/*.spec.ts", - "start": "fabric-chaincode-node start", - "build": "tsc", - "build:watch": "tsc -w", - "prepublishOnly": "npm run build" - }, - "engineStrict": true, - "author": "Hyperledger", - "license": "Apache-2.0", - "dependencies": { - "fabric-contract-api": "~1.4.0", - "fabric-shim": "~1.4.0" - }, - "devDependencies": { - "@types/chai": "^4.1.7", - "@types/mocha": "^5.2.5", - "@types/node": "^10.12.10", - "@types/sinon": "^5.0.7", - "@types/sinon-chai": "^3.2.1", - "chai": "^4.2.0", - "mocha": "^5.2.0", - "nyc": "^13.1.0", - "sinon": "^7.1.1", - "sinon-chai": "^3.3.0", - "ts-node": "^7.0.1", - "tslint": "^5.11.0", - "typescript": "^3.1.6" - }, - "nyc": { - "extension": [ - ".ts", - ".tsx" - ], - "exclude": [ - "coverage/**", - "dist/**" - ], - "reporter": [ - "text-summary", - "html" - ], - "all": true, - "check-coverage": true, - "statements": 100, - "branches": 100, - "functions": 100, - "lines": 100 - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/car.ts b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/car.ts deleted file mode 100644 index ba1016253..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/car.ts +++ /dev/null @@ -1,11 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -export class Car { - public docType?: string; - public color: string; - public make: string; - public model: string; - public owner: string; -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/fabcar.ts b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/fabcar.ts deleted file mode 100644 index de72b0394..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/fabcar.ts +++ /dev/null @@ -1,153 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -import { Context, Contract } from 'fabric-contract-api'; -import { Car } from './car'; - -export class FabCar extends Contract { - - public async initLedger(ctx: Context) { - console.info('============= START : Initialize Ledger ==========='); - const cars: Car[] = [ - { - color: 'blue', - make: 'Toyota', - model: 'Prius', - owner: 'Tomoko', - }, - { - color: 'red', - make: 'Ford', - model: 'Mustang', - owner: 'Brad', - }, - { - color: 'green', - make: 'Hyundai', - model: 'Tucson', - owner: 'Jin Soo', - }, - { - color: 'yellow', - make: 'Volkswagen', - model: 'Passat', - owner: 'Max', - }, - { - color: 'black', - make: 'Tesla', - model: 'S', - owner: 'Adriana', - }, - { - color: 'purple', - make: 'Peugeot', - model: '205', - owner: 'Michel', - }, - { - color: 'white', - make: 'Chery', - model: 'S22L', - owner: 'Aarav', - }, - { - color: 'violet', - make: 'Fiat', - model: 'Punto', - owner: 'Pari', - }, - { - color: 'indigo', - make: 'Tata', - model: 'Nano', - owner: 'Valeria', - }, - { - color: 'brown', - make: 'Holden', - model: 'Barina', - owner: 'Shotaro', - }, - ]; - - for (let i = 0; i < cars.length; i++) { - cars[i].docType = 'car'; - await ctx.stub.putState('CAR' + i, Buffer.from(JSON.stringify(cars[i]))); - console.info('Added <--> ', cars[i]); - } - console.info('============= END : Initialize Ledger ==========='); - } - - public async queryCar(ctx: Context, carNumber: string): Promise { - const carAsBytes = await ctx.stub.getState(carNumber); // get the car from chaincode state - if (!carAsBytes || carAsBytes.length === 0) { - throw new Error(`${carNumber} does not exist`); - } - console.log(carAsBytes.toString()); - return carAsBytes.toString(); - } - - public async createCar(ctx: Context, carNumber: string, make: string, model: string, color: string, owner: string) { - console.info('============= START : Create Car ==========='); - - const car: Car = { - color, - docType: 'car', - make, - model, - owner, - }; - - await ctx.stub.putState(carNumber, Buffer.from(JSON.stringify(car))); - console.info('============= END : Create Car ==========='); - } - - public async queryAllCars(ctx: Context): Promise { - const startKey = 'CAR0'; - const endKey = 'CAR999'; - - const iterator = await ctx.stub.getStateByRange(startKey, endKey); - - const allResults = []; - while (true) { - const res = await iterator.next(); - - if (res.value && res.value.value.toString()) { - console.log(res.value.value.toString('utf8')); - - const Key = res.value.key; - let Record; - try { - Record = JSON.parse(res.value.value.toString('utf8')); - } catch (err) { - console.log(err); - Record = res.value.value.toString('utf8'); - } - allResults.push({ Key, Record }); - } - if (res.done) { - console.log('end of data'); - await iterator.close(); - console.info(allResults); - return JSON.stringify(allResults); - } - } - } - - public async changeCarOwner(ctx: Context, carNumber: string, newOwner: string) { - console.info('============= START : changeCarOwner ==========='); - - const carAsBytes = await ctx.stub.getState(carNumber); // get the car from chaincode state - if (!carAsBytes || carAsBytes.length === 0) { - throw new Error(`${carNumber} does not exist`); - } - const car: Car = JSON.parse(carAsBytes.toString()); - car.owner = newOwner; - - await ctx.stub.putState(carNumber, Buffer.from(JSON.stringify(car))); - console.info('============= END : changeCarOwner ==========='); - } - -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/index.ts b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/index.ts deleted file mode 100644 index c0a2fcf67..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/src/index.ts +++ /dev/null @@ -1,8 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -import { FabCar } from './fabcar'; -export { FabCar } from './fabcar'; - -export const contracts: any[] = [ FabCar ]; diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/tsconfig.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/tsconfig.json deleted file mode 100644 index 8c96ea071..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/tsconfig.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "compilerOptions": { - "outDir": "dist", - "target": "es2017", - "moduleResolution": "node", - "module": "commonjs", - "declaration": true, - "sourceMap": true - }, - "include": [ - "./src/**/*" - ], - "exclude": [ - "./src/**/*.spec.ts" - ] -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/tslint.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/tslint.json deleted file mode 100644 index 33ccbf3c6..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/fabcar/typescript/tslint.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "defaultSeverity": "error", - "extends": [ - "tslint:recommended" - ], - "jsRules": {}, - "rules": { - "indent": [true, "spaces", 4], - "linebreak-style": [true, "LF"], - "quotemark": [true, "single"], - "semicolon": [true, "always"], - "no-console": false, - "curly": true, - "triple-equals": true, - "no-string-throw": true, - "no-var-keyword": true, - "no-trailing-whitespace": true, - "object-literal-key-quotes": [true, "as-needed"] - }, - "rulesDirectory": [] -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/go/META-INF/statedb/couchdb/indexes/indexOwner.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/go/META-INF/statedb/couchdb/indexes/indexOwner.json deleted file mode 100644 index 305f09044..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/go/META-INF/statedb/couchdb/indexes/indexOwner.json +++ /dev/null @@ -1 +0,0 @@ -{"index":{"fields":["docType","owner"]},"ddoc":"indexOwnerDoc", "name":"indexOwner","type":"json"} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/go/marbles_chaincode.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/go/marbles_chaincode.go deleted file mode 100644 index 2ed3efd67..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/go/marbles_chaincode.go +++ /dev/null @@ -1,755 +0,0 @@ -/* - SPDX-License-Identifier: Apache-2.0 -*/ - -// ====CHAINCODE EXECUTION SAMPLES (CLI) ================== - -// ==== Invoke marbles ==== -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble1","blue","35","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble2","red","50","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble3","blue","70","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarble","marble2","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarblesBasedOnColor","blue","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["delete","marble1"]}' - -// ==== Query marbles ==== -// peer chaincode query -C myc1 -n marbles -c '{"Args":["readMarble","marble1"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getMarblesByRange","marble1","marble3"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getHistoryForMarble","marble1"]}' - -// Rich Query (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesByOwner","tom"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"owner\":\"tom\"}}"]}' - -// Rich Query with Pagination (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesWithPagination","{\"selector\":{\"owner\":\"tom\"}}","3",""]}' - -// INDEXES TO SUPPORT COUCHDB RICH QUERIES -// -// Indexes in CouchDB are required in order to make JSON queries efficient and are required for -// any JSON query with a sort. As of Hyperledger Fabric 1.1, indexes may be packaged alongside -// chaincode in a META-INF/statedb/couchdb/indexes directory. Each index must be defined in its own -// text file with extension *.json with the index definition formatted in JSON following the -// CouchDB index JSON syntax as documented at: -// http://docs.couchdb.org/en/2.1.1/api/database/find.html#db-index -// -// This marbles02 example chaincode demonstrates a packaged -// index which you can find in META-INF/statedb/couchdb/indexes/indexOwner.json. -// For deployment of chaincode to production environments, it is recommended -// to define any indexes alongside chaincode so that the chaincode and supporting indexes -// are deployed automatically as a unit, once the chaincode has been installed on a peer and -// instantiated on a channel. See Hyperledger Fabric documentation for more details. -// -// If you have access to the your peer's CouchDB state database in a development environment, -// you may want to iteratively test various indexes in support of your chaincode queries. You -// can use the CouchDB Fauxton interface or a command line curl utility to create and update -// indexes. Then once you finalize an index, include the index definition alongside your -// chaincode in the META-INF/statedb/couchdb/indexes directory, for packaging and deployment -// to managed environments. -// -// In the examples below you can find index definitions that support marbles02 -// chaincode queries, along with the syntax that you can use in development environments -// to create the indexes in the CouchDB Fauxton interface or a curl command line utility. -// - -//Example hostname:port configurations to access CouchDB. -// -//To access CouchDB docker container from within another docker container or from vagrant environments: -// http://couchdb:5984/ -// -//Inside couchdb docker container -// http://127.0.0.1:5984/ - -// Index for docType, owner. -// -// Example curl command line to define index in the CouchDB channel_chaincode database -// curl -i -X POST -H "Content-Type: application/json" -d "{\"index\":{\"fields\":[\"docType\",\"owner\"]},\"name\":\"indexOwner\",\"ddoc\":\"indexOwnerDoc\",\"type\":\"json\"}" http://hostname:port/myc1_marbles/_index -// - -// Index for docType, owner, size (descending order). -// -// Example curl command line to define index in the CouchDB channel_chaincode database -// curl -i -X POST -H "Content-Type: application/json" -d "{\"index\":{\"fields\":[{\"size\":\"desc\"},{\"docType\":\"desc\"},{\"owner\":\"desc\"}]},\"ddoc\":\"indexSizeSortDoc\", \"name\":\"indexSizeSortDesc\",\"type\":\"json\"}" http://hostname:port/myc1_marbles/_index - -// Rich Query with index design doc and index name specified (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"docType\":\"marble\",\"owner\":\"tom\"}, \"use_index\":[\"_design/indexOwnerDoc\", \"indexOwner\"]}"]}' - -// Rich Query with index design doc specified only (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"docType\":{\"$eq\":\"marble\"},\"owner\":{\"$eq\":\"tom\"},\"size\":{\"$gt\":0}},\"fields\":[\"docType\",\"owner\",\"size\"],\"sort\":[{\"size\":\"desc\"}],\"use_index\":\"_design/indexSizeSortDoc\"}"]}' - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -type marble struct { - ObjectType string `json:"docType"` //docType is used to distinguish the various types of objects in state database - Name string `json:"name"` //the fieldtags are needed to keep case from bouncing around - Color string `json:"color"` - Size int `json:"size"` - Owner string `json:"owner"` -} - -// =================================================================================== -// Main -// =================================================================================== -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} - -// Init initializes chaincode -// =========================== -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke - Our entry point for Invocations -// ======================================== -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - fmt.Println("invoke is running " + function) - - // Handle different functions - if function == "initMarble" { //create a new marble - return t.initMarble(stub, args) - } else if function == "transferMarble" { //change owner of a specific marble - return t.transferMarble(stub, args) - } else if function == "transferMarblesBasedOnColor" { //transfer all marbles of a certain color - return t.transferMarblesBasedOnColor(stub, args) - } else if function == "delete" { //delete a marble - return t.delete(stub, args) - } else if function == "readMarble" { //read a marble - return t.readMarble(stub, args) - } else if function == "queryMarblesByOwner" { //find marbles for owner X using rich query - return t.queryMarblesByOwner(stub, args) - } else if function == "queryMarbles" { //find marbles based on an ad hoc rich query - return t.queryMarbles(stub, args) - } else if function == "getHistoryForMarble" { //get history of values for a marble - return t.getHistoryForMarble(stub, args) - } else if function == "getMarblesByRange" { //get marbles based on range query - return t.getMarblesByRange(stub, args) - } else if function == "getMarblesByRangeWithPagination" { - return t.getMarblesByRangeWithPagination(stub, args) - } else if function == "queryMarblesWithPagination" { - return t.queryMarblesWithPagination(stub, args) - } - - fmt.Println("invoke did not find func: " + function) //error - return shim.Error("Received unknown function invocation") -} - -// ============================================================ -// initMarble - create a new marble, store into chaincode state -// ============================================================ -func (t *SimpleChaincode) initMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var err error - - // 0 1 2 3 - // "asdf", "blue", "35", "bob" - if len(args) != 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - // ==== Input sanitation ==== - fmt.Println("- start init marble") - if len(args[0]) <= 0 { - return shim.Error("1st argument must be a non-empty string") - } - if len(args[1]) <= 0 { - return shim.Error("2nd argument must be a non-empty string") - } - if len(args[2]) <= 0 { - return shim.Error("3rd argument must be a non-empty string") - } - if len(args[3]) <= 0 { - return shim.Error("4th argument must be a non-empty string") - } - marbleName := args[0] - color := strings.ToLower(args[1]) - owner := strings.ToLower(args[3]) - size, err := strconv.Atoi(args[2]) - if err != nil { - return shim.Error("3rd argument must be a numeric string") - } - - // ==== Check if marble already exists ==== - marbleAsBytes, err := stub.GetState(marbleName) - if err != nil { - return shim.Error("Failed to get marble: " + err.Error()) - } else if marbleAsBytes != nil { - fmt.Println("This marble already exists: " + marbleName) - return shim.Error("This marble already exists: " + marbleName) - } - - // ==== Create marble object and marshal to JSON ==== - objectType := "marble" - marble := &marble{objectType, marbleName, color, size, owner} - marbleJSONasBytes, err := json.Marshal(marble) - if err != nil { - return shim.Error(err.Error()) - } - //Alternatively, build the marble json string manually if you don't want to use struct marshalling - //marbleJSONasString := `{"docType":"Marble", "name": "` + marbleName + `", "color": "` + color + `", "size": ` + strconv.Itoa(size) + `, "owner": "` + owner + `"}` - //marbleJSONasBytes := []byte(str) - - // === Save marble to state === - err = stub.PutState(marbleName, marbleJSONasBytes) - if err != nil { - return shim.Error(err.Error()) - } - - // ==== Index the marble to enable color-based range queries, e.g. return all blue marbles ==== - // An 'index' is a normal key/value entry in state. - // The key is a composite key, with the elements that you want to range query on listed first. - // In our case, the composite key is based on indexName~color~name. - // This will enable very efficient state range queries based on composite keys matching indexName~color~* - indexName := "color~name" - colorNameIndexKey, err := stub.CreateCompositeKey(indexName, []string{marble.Color, marble.Name}) - if err != nil { - return shim.Error(err.Error()) - } - // Save index entry to state. Only the key name is needed, no need to store a duplicate copy of the marble. - // Note - passing a 'nil' value will effectively delete the key from state, therefore we pass null character as value - value := []byte{0x00} - stub.PutState(colorNameIndexKey, value) - - // ==== Marble saved and indexed. Return success ==== - fmt.Println("- end init marble") - return shim.Success(nil) -} - -// =============================================== -// readMarble - read a marble from chaincode state -// =============================================== -func (t *SimpleChaincode) readMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var name, jsonResp string - var err error - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting name of the marble to query") - } - - name = args[0] - valAsbytes, err := stub.GetState(name) //get the marble from chaincode state - if err != nil { - jsonResp = "{\"Error\":\"Failed to get state for " + name + "\"}" - return shim.Error(jsonResp) - } else if valAsbytes == nil { - jsonResp = "{\"Error\":\"Marble does not exist: " + name + "\"}" - return shim.Error(jsonResp) - } - - return shim.Success(valAsbytes) -} - -// ================================================== -// delete - remove a marble key/value pair from state -// ================================================== -func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var jsonResp string - var marbleJSON marble - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - marbleName := args[0] - - // to maintain the color~name index, we need to read the marble first and get its color - valAsbytes, err := stub.GetState(marbleName) //get the marble from chaincode state - if err != nil { - jsonResp = "{\"Error\":\"Failed to get state for " + marbleName + "\"}" - return shim.Error(jsonResp) - } else if valAsbytes == nil { - jsonResp = "{\"Error\":\"Marble does not exist: " + marbleName + "\"}" - return shim.Error(jsonResp) - } - - err = json.Unmarshal([]byte(valAsbytes), &marbleJSON) - if err != nil { - jsonResp = "{\"Error\":\"Failed to decode JSON of: " + marbleName + "\"}" - return shim.Error(jsonResp) - } - - err = stub.DelState(marbleName) //remove the marble from chaincode state - if err != nil { - return shim.Error("Failed to delete state:" + err.Error()) - } - - // maintain the index - indexName := "color~name" - colorNameIndexKey, err := stub.CreateCompositeKey(indexName, []string{marbleJSON.Color, marbleJSON.Name}) - if err != nil { - return shim.Error(err.Error()) - } - - // Delete index entry to state. - err = stub.DelState(colorNameIndexKey) - if err != nil { - return shim.Error("Failed to delete state:" + err.Error()) - } - return shim.Success(nil) -} - -// =========================================================== -// transfer a marble by setting a new owner name on the marble -// =========================================================== -func (t *SimpleChaincode) transferMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 1 - // "name", "bob" - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - marbleName := args[0] - newOwner := strings.ToLower(args[1]) - fmt.Println("- start transferMarble ", marbleName, newOwner) - - marbleAsBytes, err := stub.GetState(marbleName) - if err != nil { - return shim.Error("Failed to get marble:" + err.Error()) - } else if marbleAsBytes == nil { - return shim.Error("Marble does not exist") - } - - marbleToTransfer := marble{} - err = json.Unmarshal(marbleAsBytes, &marbleToTransfer) //unmarshal it aka JSON.parse() - if err != nil { - return shim.Error(err.Error()) - } - marbleToTransfer.Owner = newOwner //change the owner - - marbleJSONasBytes, _ := json.Marshal(marbleToTransfer) - err = stub.PutState(marbleName, marbleJSONasBytes) //rewrite the marble - if err != nil { - return shim.Error(err.Error()) - } - - fmt.Println("- end transferMarble (success)") - return shim.Success(nil) -} - -// =========================================================================================== -// constructQueryResponseFromIterator constructs a JSON array containing query results from -// a given result iterator -// =========================================================================================== -func constructQueryResponseFromIterator(resultsIterator shim.StateQueryIteratorInterface) (*bytes.Buffer, error) { - // buffer is a JSON array containing QueryResults - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - queryResponse, err := resultsIterator.Next() - if err != nil { - return nil, err - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"Key\":") - buffer.WriteString("\"") - buffer.WriteString(queryResponse.Key) - buffer.WriteString("\"") - - buffer.WriteString(", \"Record\":") - // Record is a JSON object, so we write as-is - buffer.WriteString(string(queryResponse.Value)) - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - return &buffer, nil -} - -// =========================================================================================== -// addPaginationMetadataToQueryResults adds QueryResponseMetadata, which contains pagination -// info, to the constructed query results -// =========================================================================================== -func addPaginationMetadataToQueryResults(buffer *bytes.Buffer, responseMetadata *pb.QueryResponseMetadata) *bytes.Buffer { - - buffer.WriteString("[{\"ResponseMetadata\":{\"RecordsCount\":") - buffer.WriteString("\"") - buffer.WriteString(fmt.Sprintf("%v", responseMetadata.FetchedRecordsCount)) - buffer.WriteString("\"") - buffer.WriteString(", \"Bookmark\":") - buffer.WriteString("\"") - buffer.WriteString(responseMetadata.Bookmark) - buffer.WriteString("\"}}]") - - return buffer -} - -// =========================================================================================== -// getMarblesByRange performs a range query based on the start and end keys provided. - -// Read-only function results are not typically submitted to ordering. If the read-only -// results are submitted to ordering, or if the query is used in an update transaction -// and submitted to ordering, then the committing peers will re-execute to guarantee that -// result sets are stable between endorsement time and commit time. The transaction is -// invalidated by the committing peers if the result set has changed between endorsement -// time and commit time. -// Therefore, range queries are a safe option for performing update transactions based on query results. -// =========================================================================================== -func (t *SimpleChaincode) getMarblesByRange(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - startKey := args[0] - endKey := args[1] - - resultsIterator, err := stub.GetStateByRange(startKey, endKey) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - buffer, err := constructQueryResponseFromIterator(resultsIterator) - if err != nil { - return shim.Error(err.Error()) - } - - fmt.Printf("- getMarblesByRange queryResult:\n%s\n", buffer.String()) - - return shim.Success(buffer.Bytes()) -} - -// ==== Example: GetStateByPartialCompositeKey/RangeQuery ========================================= -// transferMarblesBasedOnColor will transfer marbles of a given color to a certain new owner. -// Uses a GetStateByPartialCompositeKey (range query) against color~name 'index'. -// Committing peers will re-execute range queries to guarantee that result sets are stable -// between endorsement time and commit time. The transaction is invalidated by the -// committing peers if the result set has changed between endorsement time and commit time. -// Therefore, range queries are a safe option for performing update transactions based on query results. -// =========================================================================================== -func (t *SimpleChaincode) transferMarblesBasedOnColor(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 1 - // "color", "bob" - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - color := args[0] - newOwner := strings.ToLower(args[1]) - fmt.Println("- start transferMarblesBasedOnColor ", color, newOwner) - - // Query the color~name index by color - // This will execute a key range query on all keys starting with 'color' - coloredMarbleResultsIterator, err := stub.GetStateByPartialCompositeKey("color~name", []string{color}) - if err != nil { - return shim.Error(err.Error()) - } - defer coloredMarbleResultsIterator.Close() - - // Iterate through result set and for each marble found, transfer to newOwner - var i int - for i = 0; coloredMarbleResultsIterator.HasNext(); i++ { - // Note that we don't get the value (2nd return variable), we'll just get the marble name from the composite key - responseRange, err := coloredMarbleResultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - - // get the color and name from color~name composite key - objectType, compositeKeyParts, err := stub.SplitCompositeKey(responseRange.Key) - if err != nil { - return shim.Error(err.Error()) - } - returnedColor := compositeKeyParts[0] - returnedMarbleName := compositeKeyParts[1] - fmt.Printf("- found a marble from index:%s color:%s name:%s\n", objectType, returnedColor, returnedMarbleName) - - // Now call the transfer function for the found marble. - // Re-use the same function that is used to transfer individual marbles - response := t.transferMarble(stub, []string{returnedMarbleName, newOwner}) - // if the transfer failed break out of loop and return error - if response.Status != shim.OK { - return shim.Error("Transfer failed: " + response.Message) - } - } - - responsePayload := fmt.Sprintf("Transferred %d %s marbles to %s", i, color, newOwner) - fmt.Println("- end transferMarblesBasedOnColor: " + responsePayload) - return shim.Success([]byte(responsePayload)) -} - -// =======Rich queries ========================================================================= -// Two examples of rich queries are provided below (parameterized query and ad hoc query). -// Rich queries pass a query string to the state database. -// Rich queries are only supported by state database implementations -// that support rich query (e.g. CouchDB). -// The query string is in the syntax of the underlying state database. -// With rich queries there is no guarantee that the result set hasn't changed between -// endorsement time and commit time, aka 'phantom reads'. -// Therefore, rich queries should not be used in update transactions, unless the -// application handles the possibility of result set changes between endorsement and commit time. -// Rich queries can be used for point-in-time queries against a peer. -// ============================================================================================ - -// ===== Example: Parameterized rich query ================================================= -// queryMarblesByOwner queries for marbles based on a passed in owner. -// This is an example of a parameterized query where the query logic is baked into the chaincode, -// and accepting a single query parameter (owner). -// Only available on state databases that support rich query (e.g. CouchDB) -// ========================================================================================= -func (t *SimpleChaincode) queryMarblesByOwner(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "bob" - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - owner := strings.ToLower(args[0]) - - queryString := fmt.Sprintf("{\"selector\":{\"docType\":\"marble\",\"owner\":\"%s\"}}", owner) - - queryResults, err := getQueryResultForQueryString(stub, queryString) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ===== Example: Ad hoc rich query ======================================================== -// queryMarbles uses a query string to perform a query for marbles. -// Query string matching state database syntax is passed in and executed as is. -// Supports ad hoc queries that can be defined at runtime by the client. -// If this is not desired, follow the queryMarblesForOwner example for parameterized queries. -// Only available on state databases that support rich query (e.g. CouchDB) -// ========================================================================================= -func (t *SimpleChaincode) queryMarbles(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "queryString" - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - queryString := args[0] - - queryResults, err := getQueryResultForQueryString(stub, queryString) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ========================================================================================= -// getQueryResultForQueryString executes the passed in query string. -// Result set is built and returned as a byte array containing the JSON results. -// ========================================================================================= -func getQueryResultForQueryString(stub shim.ChaincodeStubInterface, queryString string) ([]byte, error) { - - fmt.Printf("- getQueryResultForQueryString queryString:\n%s\n", queryString) - - resultsIterator, err := stub.GetQueryResult(queryString) - if err != nil { - return nil, err - } - defer resultsIterator.Close() - - buffer, err := constructQueryResponseFromIterator(resultsIterator) - if err != nil { - return nil, err - } - - fmt.Printf("- getQueryResultForQueryString queryResult:\n%s\n", buffer.String()) - - return buffer.Bytes(), nil -} - -// ====== Pagination ========================================================================= -// Pagination provides a method to retrieve records with a defined pagesize and -// start point (bookmark). An empty string bookmark defines the first "page" of a query -// result. Paginated queries return a bookmark that can be used in -// the next query to retrieve the next page of results. Paginated queries extend -// rich queries and range queries to include a pagesize and bookmark. -// -// Two examples are provided in this example. The first is getMarblesByRangeWithPagination -// which executes a paginated range query. -// The second example is a paginated query for rich ad-hoc queries. -// ========================================================================================= - -// ====== Example: Pagination with Range Query =============================================== -// getMarblesByRangeWithPagination performs a range query based on the start & end key, -// page size and a bookmark. - -// The number of fetched records will be equal to or lesser than the page size. -// Paginated range queries are only valid for read only transactions. -// =========================================================================================== -func (t *SimpleChaincode) getMarblesByRangeWithPagination(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - startKey := args[0] - endKey := args[1] - //return type of ParseInt is int64 - pageSize, err := strconv.ParseInt(args[2], 10, 32) - if err != nil { - return shim.Error(err.Error()) - } - bookmark := args[3] - - resultsIterator, responseMetadata, err := stub.GetStateByRangeWithPagination(startKey, endKey, int32(pageSize), bookmark) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - buffer, err := constructQueryResponseFromIterator(resultsIterator) - if err != nil { - return shim.Error(err.Error()) - } - - bufferWithPaginationInfo := addPaginationMetadataToQueryResults(buffer, responseMetadata) - - fmt.Printf("- getMarblesByRange queryResult:\n%s\n", bufferWithPaginationInfo.String()) - - return shim.Success(buffer.Bytes()) -} - -// ===== Example: Pagination with Ad hoc Rich Query ======================================================== -// queryMarblesWithPagination uses a query string, page size and a bookmark to perform a query -// for marbles. Query string matching state database syntax is passed in and executed as is. -// The number of fetched records would be equal to or lesser than the specified page size. -// Supports ad hoc queries that can be defined at runtime by the client. -// If this is not desired, follow the queryMarblesForOwner example for parameterized queries. -// Only available on state databases that support rich query (e.g. CouchDB) -// Paginated queries are only valid for read only transactions. -// ========================================================================================= -func (t *SimpleChaincode) queryMarblesWithPagination(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "queryString" - if len(args) < 3 { - return shim.Error("Incorrect number of arguments. Expecting 3") - } - - queryString := args[0] - //return type of ParseInt is int64 - pageSize, err := strconv.ParseInt(args[1], 10, 32) - if err != nil { - return shim.Error(err.Error()) - } - bookmark := args[2] - - queryResults, err := getQueryResultForQueryStringWithPagination(stub, queryString, int32(pageSize), bookmark) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ========================================================================================= -// getQueryResultForQueryStringWithPagination executes the passed in query string with -// pagination info. Result set is built and returned as a byte array containing the JSON results. -// ========================================================================================= -func getQueryResultForQueryStringWithPagination(stub shim.ChaincodeStubInterface, queryString string, pageSize int32, bookmark string) ([]byte, error) { - - fmt.Printf("- getQueryResultForQueryString queryString:\n%s\n", queryString) - - resultsIterator, responseMetadata, err := stub.GetQueryResultWithPagination(queryString, pageSize, bookmark) - if err != nil { - return nil, err - } - defer resultsIterator.Close() - - buffer, err := constructQueryResponseFromIterator(resultsIterator) - if err != nil { - return nil, err - } - - bufferWithPaginationInfo := addPaginationMetadataToQueryResults(buffer, responseMetadata) - - fmt.Printf("- getQueryResultForQueryString queryResult:\n%s\n", bufferWithPaginationInfo.String()) - - return buffer.Bytes(), nil -} - -func (t *SimpleChaincode) getHistoryForMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - marbleName := args[0] - - fmt.Printf("- start getHistoryForMarble: %s\n", marbleName) - - resultsIterator, err := stub.GetHistoryForKey(marbleName) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - // buffer is a JSON array containing historic values for the marble - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - response, err := resultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"TxId\":") - buffer.WriteString("\"") - buffer.WriteString(response.TxId) - buffer.WriteString("\"") - - buffer.WriteString(", \"Value\":") - // if it was a delete operation on given key, then we need to set the - //corresponding value null. Else, we will write the response.Value - //as-is (as the Value itself a JSON marble) - if response.IsDelete { - buffer.WriteString("null") - } else { - buffer.WriteString(string(response.Value)) - } - - buffer.WriteString(", \"Timestamp\":") - buffer.WriteString("\"") - buffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String()) - buffer.WriteString("\"") - - buffer.WriteString(", \"IsDelete\":") - buffer.WriteString("\"") - buffer.WriteString(strconv.FormatBool(response.IsDelete)) - buffer.WriteString("\"") - - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Printf("- getHistoryForMarble returning:\n%s\n", buffer.String()) - - return shim.Success(buffer.Bytes()) -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/META-INF/statedb/couchdb/indexes/indexOwner.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/META-INF/statedb/couchdb/indexes/indexOwner.json deleted file mode 100644 index 305f09044..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/META-INF/statedb/couchdb/indexes/indexOwner.json +++ /dev/null @@ -1 +0,0 @@ -{"index":{"fields":["docType","owner"]},"ddoc":"indexOwnerDoc", "name":"indexOwner","type":"json"} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/marbles_chaincode.js b/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/marbles_chaincode.js deleted file mode 100644 index 7536178c8..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/marbles_chaincode.js +++ /dev/null @@ -1,481 +0,0 @@ -/* -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -*/ - -// ====CHAINCODE EXECUTION SAMPLES (CLI) ================== - -// ==== Invoke marbles ==== -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble1","blue","35","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble2","red","50","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble3","blue","70","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarble","marble2","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarblesBasedOnColor","blue","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["delete","marble1"]}' - -// ==== Query marbles ==== -// peer chaincode query -C myc1 -n marbles -c '{"Args":["readMarble","marble1"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getMarblesByRange","marble1","marble3"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getHistoryForMarble","marble1"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getMarblesByRangeWithPagination","marble1","marble3","3",""]}' - -// Rich Query (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesByOwner","tom"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"owner\":\"tom\"}}"]}' - -// Rich Query with Pagination (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesWithPagination","{\"selector\":{\"owner\":\"tom\"}}","3",""]}' - -'use strict'; -const shim = require('fabric-shim'); -const util = require('util'); - -let Chaincode = class { - async Init(stub) { - let ret = stub.getFunctionAndParameters(); - console.info(ret); - console.info('=========== Instantiated Marbles Chaincode ==========='); - return shim.success(); - } - - async Invoke(stub) { - console.info('Transaction ID: ' + stub.getTxID()); - console.info(util.format('Args: %j', stub.getArgs())); - - let ret = stub.getFunctionAndParameters(); - console.info(ret); - - let method = this[ret.fcn]; - if (!method) { - console.log('no function of name:' + ret.fcn + ' found'); - throw new Error('Received unknown function ' + ret.fcn + ' invocation'); - } - try { - let payload = await method(stub, ret.params, this); - return shim.success(payload); - } catch (err) { - console.log(err); - return shim.error(err); - } - } - - // =============================================== - // initMarble - create a new marble - // =============================================== - async initMarble(stub, args, thisClass) { - if (args.length != 4) { - throw new Error('Incorrect number of arguments. Expecting 4'); - } - // ==== Input sanitation ==== - console.info('--- start init marble ---') - if (args[0].lenth <= 0) { - throw new Error('1st argument must be a non-empty string'); - } - if (args[1].lenth <= 0) { - throw new Error('2nd argument must be a non-empty string'); - } - if (args[2].lenth <= 0) { - throw new Error('3rd argument must be a non-empty string'); - } - if (args[3].lenth <= 0) { - throw new Error('4th argument must be a non-empty string'); - } - let marbleName = args[0]; - let color = args[1].toLowerCase(); - let owner = args[3].toLowerCase(); - let size = parseInt(args[2]); - if (typeof size !== 'number') { - throw new Error('3rd argument must be a numeric string'); - } - - // ==== Check if marble already exists ==== - let marbleState = await stub.getState(marbleName); - if (marbleState.toString()) { - throw new Error('This marble already exists: ' + marbleName); - } - - // ==== Create marble object and marshal to JSON ==== - let marble = {}; - marble.docType = 'marble'; - marble.name = marbleName; - marble.color = color; - marble.size = size; - marble.owner = owner; - - // === Save marble to state === - await stub.putState(marbleName, Buffer.from(JSON.stringify(marble))); - let indexName = 'color~name' - let colorNameIndexKey = await stub.createCompositeKey(indexName, [marble.color, marble.name]); - console.info(colorNameIndexKey); - // Save index entry to state. Only the key name is needed, no need to store a duplicate copy of the marble. - // Note - passing a 'nil' value will effectively delete the key from state, therefore we pass null character as value - await stub.putState(colorNameIndexKey, Buffer.from('\u0000')); - // ==== Marble saved and indexed. Return success ==== - console.info('- end init marble'); - } - - // =============================================== - // readMarble - read a marble from chaincode state - // =============================================== - async readMarble(stub, args, thisClass) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting name of the marble to query'); - } - - let name = args[0]; - if (!name) { - throw new Error(' marble name must not be empty'); - } - let marbleAsbytes = await stub.getState(name); //get the marble from chaincode state - if (!marbleAsbytes.toString()) { - let jsonResp = {}; - jsonResp.Error = 'Marble does not exist: ' + name; - throw new Error(JSON.stringify(jsonResp)); - } - console.info('======================================='); - console.log(marbleAsbytes.toString()); - console.info('======================================='); - return marbleAsbytes; - } - - // ================================================== - // delete - remove a marble key/value pair from state - // ================================================== - async delete(stub, args, thisClass) { - if (args.length != 1) { - throw new Error('Incorrect number of arguments. Expecting name of the marble to delete'); - } - let marbleName = args[0]; - if (!marbleName) { - throw new Error('marble name must not be empty'); - } - // to maintain the color~name index, we need to read the marble first and get its color - let valAsbytes = await stub.getState(marbleName); //get the marble from chaincode state - let jsonResp = {}; - if (!valAsbytes) { - jsonResp.error = 'marble does not exist: ' + name; - throw new Error(jsonResp); - } - let marbleJSON = {}; - try { - marbleJSON = JSON.parse(valAsbytes.toString()); - } catch (err) { - jsonResp = {}; - jsonResp.error = 'Failed to decode JSON of: ' + marbleName; - throw new Error(jsonResp); - } - - await stub.deleteState(marbleName); //remove the marble from chaincode state - - // delete the index - let indexName = 'color~name'; - let colorNameIndexKey = stub.createCompositeKey(indexName, [marbleJSON.color, marbleJSON.name]); - if (!colorNameIndexKey) { - throw new Error(' Failed to create the createCompositeKey'); - } - // Delete index entry to state. - await stub.deleteState(colorNameIndexKey); - } - - // =========================================================== - // transfer a marble by setting a new owner name on the marble - // =========================================================== - async transferMarble(stub, args, thisClass) { - // 0 1 - // 'name', 'bob' - if (args.length < 2) { - throw new Error('Incorrect number of arguments. Expecting marblename and owner') - } - - let marbleName = args[0]; - let newOwner = args[1].toLowerCase(); - console.info('- start transferMarble ', marbleName, newOwner); - - let marbleAsBytes = await stub.getState(marbleName); - if (!marbleAsBytes || !marbleAsBytes.toString()) { - throw new Error('marble does not exist'); - } - let marbleToTransfer = {}; - try { - marbleToTransfer = JSON.parse(marbleAsBytes.toString()); //unmarshal - } catch (err) { - let jsonResp = {}; - jsonResp.error = 'Failed to decode JSON of: ' + marbleName; - throw new Error(jsonResp); - } - console.info(marbleToTransfer); - marbleToTransfer.owner = newOwner; //change the owner - - let marbleJSONasBytes = Buffer.from(JSON.stringify(marbleToTransfer)); - await stub.putState(marbleName, marbleJSONasBytes); //rewrite the marble - - console.info('- end transferMarble (success)'); - } - - // =========================================================================================== - // getMarblesByRange performs a range query based on the start and end keys provided. - - // Read-only function results are not typically submitted to ordering. If the read-only - // results are submitted to ordering, or if the query is used in an update transaction - // and submitted to ordering, then the committing peers will re-execute to guarantee that - // result sets are stable between endorsement time and commit time. The transaction is - // invalidated by the committing peers if the result set has changed between endorsement - // time and commit time. - // Therefore, range queries are a safe option for performing update transactions based on query results. - // =========================================================================================== - async getMarblesByRange(stub, args, thisClass) { - - if (args.length < 2) { - throw new Error('Incorrect number of arguments. Expecting 2'); - } - - let startKey = args[0]; - let endKey = args[1]; - - let resultsIterator = await stub.getStateByRange(startKey, endKey); - let method = thisClass['getAllResults']; - let results = await method(resultsIterator, false); - - return Buffer.from(JSON.stringify(results)); - } - - // ==== Example: GetStateByPartialCompositeKey/RangeQuery ========================================= - // transferMarblesBasedOnColor will transfer marbles of a given color to a certain new owner. - // Uses a GetStateByPartialCompositeKey (range query) against color~name 'index'. - // Committing peers will re-execute range queries to guarantee that result sets are stable - // between endorsement time and commit time. The transaction is invalidated by the - // committing peers if the result set has changed between endorsement time and commit time. - // Therefore, range queries are a safe option for performing update transactions based on query results. - // =========================================================================================== - async transferMarblesBasedOnColor(stub, args, thisClass) { - - // 0 1 - // 'color', 'bob' - if (args.length < 2) { - throw new Error('Incorrect number of arguments. Expecting color and owner'); - } - - let color = args[0]; - let newOwner = args[1].toLowerCase(); - console.info('- start transferMarblesBasedOnColor ', color, newOwner); - - // Query the color~name index by color - // This will execute a key range query on all keys starting with 'color' - let coloredMarbleResultsIterator = await stub.getStateByPartialCompositeKey('color~name', [color]); - - let method = thisClass['transferMarble']; - // Iterate through result set and for each marble found, transfer to newOwner - while (true) { - let responseRange = await coloredMarbleResultsIterator.next(); - if (!responseRange || !responseRange.value || !responseRange.value.key) { - return; - } - console.log(responseRange.value.key); - - // let value = res.value.value.toString('utf8'); - let objectType; - let attributes; - ({ - objectType, - attributes - } = await stub.splitCompositeKey(responseRange.value.key)); - - let returnedColor = attributes[0]; - let returnedMarbleName = attributes[1]; - console.info(util.format('- found a marble from index:%s color:%s name:%s\n', objectType, returnedColor, returnedMarbleName)); - - // Now call the transfer function for the found marble. - // Re-use the same function that is used to transfer individual marbles - let response = await method(stub, [returnedMarbleName, newOwner]); - } - - let responsePayload = util.format('Transferred %s marbles to %s', color, newOwner); - console.info('- end transferMarblesBasedOnColor: ' + responsePayload); - } - - - // ===== Example: Parameterized rich query ================================================= - // queryMarblesByOwner queries for marbles based on a passed in owner. - // This is an example of a parameterized query where the query logic is baked into the chaincode, - // and accepting a single query parameter (owner). - // Only available on state databases that support rich query (e.g. CouchDB) - // ========================================================================================= - async queryMarblesByOwner(stub, args, thisClass) { - // 0 - // 'bob' - if (args.length < 1) { - throw new Error('Incorrect number of arguments. Expecting owner name.') - } - - let owner = args[0].toLowerCase(); - let queryString = {}; - queryString.selector = {}; - queryString.selector.docType = 'marble'; - queryString.selector.owner = owner; - let method = thisClass['getQueryResultForQueryString']; - let queryResults = await method(stub, JSON.stringify(queryString), thisClass); - return queryResults; //shim.success(queryResults); - } - - // ===== Example: Ad hoc rich query ======================================================== - // queryMarbles uses a query string to perform a query for marbles. - // Query string matching state database syntax is passed in and executed as is. - // Supports ad hoc queries that can be defined at runtime by the client. - // If this is not desired, follow the queryMarblesForOwner example for parameterized queries. - // Only available on state databases that support rich query (e.g. CouchDB) - // ========================================================================================= - async queryMarbles(stub, args, thisClass) { - // 0 - // 'queryString' - if (args.length < 1) { - throw new Error('Incorrect number of arguments. Expecting queryString'); - } - let queryString = args[0]; - if (!queryString) { - throw new Error('queryString must not be empty'); - } - let method = thisClass['getQueryResultForQueryString']; - let queryResults = await method(stub, queryString, thisClass); - return queryResults; - } - - async getAllResults(iterator, isHistory) { - let allResults = []; - while (true) { - let res = await iterator.next(); - - if (res.value && res.value.value.toString()) { - let jsonRes = {}; - console.log(res.value.value.toString('utf8')); - - if (isHistory && isHistory === true) { - jsonRes.TxId = res.value.tx_id; - jsonRes.Timestamp = res.value.timestamp; - jsonRes.IsDelete = res.value.is_delete.toString(); - try { - jsonRes.Value = JSON.parse(res.value.value.toString('utf8')); - } catch (err) { - console.log(err); - jsonRes.Value = res.value.value.toString('utf8'); - } - } else { - jsonRes.Key = res.value.key; - try { - jsonRes.Record = JSON.parse(res.value.value.toString('utf8')); - } catch (err) { - console.log(err); - jsonRes.Record = res.value.value.toString('utf8'); - } - } - allResults.push(jsonRes); - } - if (res.done) { - console.log('end of data'); - await iterator.close(); - console.info(allResults); - return allResults; - } - } - } - - // ========================================================================================= - // getQueryResultForQueryString executes the passed in query string. - // Result set is built and returned as a byte array containing the JSON results. - // ========================================================================================= - async getQueryResultForQueryString(stub, queryString, thisClass) { - - console.info('- getQueryResultForQueryString queryString:\n' + queryString) - let resultsIterator = await stub.getQueryResult(queryString); - let method = thisClass['getAllResults']; - - let results = await method(resultsIterator, false); - - return Buffer.from(JSON.stringify(results)); - } - - async getHistoryForMarble(stub, args, thisClass) { - - if (args.length < 1) { - throw new Error('Incorrect number of arguments. Expecting 1') - } - let marbleName = args[0]; - console.info('- start getHistoryForMarble: %s\n', marbleName); - - let resultsIterator = await stub.getHistoryForKey(marbleName); - let method = thisClass['getAllResults']; - let results = await method(resultsIterator, true); - - return Buffer.from(JSON.stringify(results)); - } - - // ====== Pagination ========================================================================= - // Pagination provides a method to retrieve records with a defined pagesize and - // start point (bookmark). An empty string bookmark defines the first "page" of a query - // result. Paginated queries return a bookmark that can be used in - // the next query to retrieve the next page of results. Paginated queries extend - // rich queries and range queries to include a pagesize and bookmark. - // - // Two examples are provided in this example. The first is getMarblesByRangeWithPagination - // which executes a paginated range query. - // The second example is a paginated query for rich ad-hoc queries. - // ========================================================================================= - - // ====== Example: Pagination with Range Query =============================================== - // getMarblesByRangeWithPagination performs a range query based on the start & end key, - // page size and a bookmark. - // - // The number of fetched records will be equal to or lesser than the page size. - // Paginated range queries are only valid for read only transactions. - // =========================================================================================== - async getMarblesByRangeWithPagination(stub, args, thisClass) { - if (args.length < 2) { - throw new Error('Incorrect number of arguments. Expecting 2'); - } - const startKey = args[0]; - const endKey = args[1]; - - const pageSize = parseInt(args[2], 10); - const bookmark = args[3]; - - const { iterator, metadata } = await stub.getStateByRangeWithPagination(startKey, endKey, pageSize, bookmark); - const getAllResults = thisClass['getAllResults']; - const results = await getAllResults(iterator, false); - // use RecordsCount and Bookmark to keep consistency with the go sample - results.ResponseMetadata = { - RecordsCount: metadata.fetched_records_count, - Bookmark: metadata.bookmark, - }; - return Buffer.from(JSON.stringify(results)); - } - - // ========================================================================================= - // getQueryResultForQueryStringWithPagination executes the passed in query string with - // pagination info. Result set is built and returned as a byte array containing the JSON results. - // ========================================================================================= - async queryMarblesWithPagination(stub, args, thisClass) { - - // 0 - // "queryString" - if (args.length < 3) { - return shim.Error("Incorrect number of arguments. Expecting 3") - } - - const queryString = args[0]; - const pageSize = parseInt(args[1], 10); - const bookmark = args[2]; - - const { iterator, metadata } = await stub.getQueryResultWithPagination(queryString, pageSize, bookmark); - const getAllResults = thisClass['getAllResults']; - const results = await getAllResults(iterator, false); - // use RecordsCount and Bookmark to keep consistency with the go sample - results.ResponseMetadata = { - RecordsCount: metadata.fetched_records_count, - Bookmark: metadata.bookmark, - }; - - return Buffer.from(JSON.stringify(results)); - } -}; - -shim.start(new Chaincode()); diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/package.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/package.json deleted file mode 100644 index 8be2caf30..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02/node/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "marbles", - "version": "1.0.0", - "description": "marbles chaincode implemented in node.js", - "engines": { - "node": ">=8.4.0", - "npm": ">=5.3.0" - }, - "scripts": { "start" : "node marbles_chaincode.js" }, - "engine-strict": true, - "license": "Apache-2.0", - "dependencies": { - "fabric-shim": "~1.4.0" - } -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/collections_config.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/collections_config.json deleted file mode 100644 index 82af88dba..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/collections_config.json +++ /dev/null @@ -1,18 +0,0 @@ -[ - { - "name": "collectionMarbles", - "policy": "OR('Org1MSP.member', 'Org2MSP.member')", - "requiredPeerCount": 0, - "maxPeerCount": 3, - "blockToLive":1000000, - "memberOnlyRead": true -}, - { - "name": "collectionMarblePrivateDetails", - "policy": "OR('Org1MSP.member')", - "requiredPeerCount": 0, - "maxPeerCount": 3, - "blockToLive":3, - "memberOnlyRead": true - } -] diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/go/META-INF/statedb/couchdb/collections/collectionMarbles/indexes/indexOwner.json b/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/go/META-INF/statedb/couchdb/collections/collectionMarbles/indexes/indexOwner.json deleted file mode 100644 index 305f09044..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/go/META-INF/statedb/couchdb/collections/collectionMarbles/indexes/indexOwner.json +++ /dev/null @@ -1 +0,0 @@ -{"index":{"fields":["docType","owner"]},"ddoc":"indexOwnerDoc", "name":"indexOwner","type":"json"} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/go/marbles_chaincode_private.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/go/marbles_chaincode_private.go deleted file mode 100644 index b5bafb8da..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/marbles02_private/go/marbles_chaincode_private.go +++ /dev/null @@ -1,650 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -// ====CHAINCODE EXECUTION SAMPLES (CLI) ================== - -// ==== Invoke marbles, pass private data as base64 encoded bytes in transient map ==== -// -// export MARBLE=$(echo -n "{\"name\":\"marble1\",\"color\":\"blue\",\"size\":35,\"owner\":\"tom\",\"price\":99}" | base64 | tr -d \\n) -// peer chaincode invoke -C mychannel -n marblesp -c '{"Args":["initMarble"]}' --transient "{\"marble\":\"$MARBLE\"}" -// -// export MARBLE=$(echo -n "{\"name\":\"marble2\",\"color\":\"red\",\"size\":50,\"owner\":\"tom\",\"price\":102}" | base64 | tr -d \\n) -// peer chaincode invoke -C mychannel -n marblesp -c '{"Args":["initMarble"]}' --transient "{\"marble\":\"$MARBLE\"}" -// -// export MARBLE=$(echo -n "{\"name\":\"marble3\",\"color\":\"blue\",\"size\":70,\"owner\":\"tom\",\"price\":103}" | base64 | tr -d \\n) -// peer chaincode invoke -C mychannel -n marblesp -c '{"Args":["initMarble"]}' --transient "{\"marble\":\"$MARBLE\"}" -// -// export MARBLE_OWNER=$(echo -n "{\"name\":\"marble2\",\"owner\":\"jerry\"}" | base64 | tr -d \\n) -// peer chaincode invoke -C mychannel -n marblesp -c '{"Args":["transferMarble"]}' --transient "{\"marble_owner\":\"$MARBLE_OWNER\"}" -// -// export MARBLE_DELETE=$(echo -n "{\"name\":\"marble1\"}" | base64 | tr -d \\n) -// peer chaincode invoke -C mychannel -n marblesp -c '{"Args":["delete"]}' --transient "{\"marble_delete\":\"$MARBLE_DELETE\"}" - -// ==== Query marbles, since queries are not recorded on chain we don't need to hide private data in transient map ==== -// peer chaincode query -C mychannel -n marblesp -c '{"Args":["readMarble","marble1"]}' -// peer chaincode query -C mychannel -n marblesp -c '{"Args":["readMarblePrivateDetails","marble1"]}' -// peer chaincode query -C mychannel -n marblesp -c '{"Args":["getMarblesByRange","marble1","marble4"]}' -// -// Rich Query (Only supported if CouchDB is used as state database): -// peer chaincode query -C mychannel -n marblesp -c '{"Args":["queryMarblesByOwner","tom"]}' -// peer chaincode query -C mychannel -n marblesp -c '{"Args":["queryMarbles","{\"selector\":{\"owner\":\"tom\"}}"]}' - -// INDEXES TO SUPPORT COUCHDB RICH QUERIES -// -// Indexes in CouchDB are required in order to make JSON queries efficient and are required for -// any JSON query with a sort. As of Hyperledger Fabric 1.1, indexes may be packaged alongside -// chaincode in a META-INF/statedb/couchdb/indexes directory. Or for indexes on private data -// collections, in a META-INF/statedb/couchdb/collections//indexes directory. -// Each index must be defined in its own text file with extension *.json with the index -// definition formatted in JSON following the CouchDB index JSON syntax as documented at: -// http://docs.couchdb.org/en/2.1.1/api/database/find.html#db-index -// -// This marbles02_private example chaincode demonstrates a packaged index which you -// can find in META-INF/statedb/couchdb/collection/collectionMarbles/indexes/indexOwner.json. -// For deployment of chaincode to production environments, it is recommended -// to define any indexes alongside chaincode so that the chaincode and supporting indexes -// are deployed automatically as a unit, once the chaincode has been installed on a peer and -// instantiated on a channel. See Hyperledger Fabric documentation for more details. -// -// If you have access to the your peer's CouchDB state database in a development environment, -// you may want to iteratively test various indexes in support of your chaincode queries. You -// can use the CouchDB Fauxton interface or a command line curl utility to create and update -// indexes. Then once you finalize an index, include the index definition alongside your -// chaincode in the META-INF/statedb/couchdb/indexes directory or -// META-INF/statedb/couchdb/collections//indexes directory, for packaging -// and deployment to managed environments. -// -// In the examples below you can find index definitions that support marbles02_private -// chaincode queries, along with the syntax that you can use in development environments -// to create the indexes in the CouchDB Fauxton interface. -// - -//Example hostname:port configurations to access CouchDB. -// -//To access CouchDB docker container from within another docker container or from vagrant environments: -// http://couchdb:5984/ -// -//Inside couchdb docker container -// http://127.0.0.1:5984/ - -// Index for docType, owner. -// Note that docType and owner fields must be prefixed with the "data" wrapper -// -// Index definition for use with Fauxton interface -// {"index":{"fields":["data.docType","data.owner"]},"ddoc":"indexOwnerDoc", "name":"indexOwner","type":"json"} - -// Index for docType, owner, size (descending order). -// Note that docType, owner and size fields must be prefixed with the "data" wrapper -// -// Index definition for use with Fauxton interface -// {"index":{"fields":[{"data.size":"desc"},{"data.docType":"desc"},{"data.owner":"desc"}]},"ddoc":"indexSizeSortDoc", "name":"indexSizeSortDesc","type":"json"} - -// Rich Query with index design doc and index name specified (Only supported if CouchDB is used as state database): -// peer chaincode query -C mychannel -n marblesp -c '{"Args":["queryMarbles","{\"selector\":{\"docType\":\"marble\",\"owner\":\"tom\"}, \"use_index\":[\"_design/indexOwnerDoc\", \"indexOwner\"]}"]}' - -// Rich Query with index design doc specified only (Only supported if CouchDB is used as state database): -// peer chaincode query -C mychannel -n marblesp -c '{"Args":["queryMarbles","{\"selector\":{\"docType\":{\"$eq\":\"marble\"},\"owner\":{\"$eq\":\"tom\"},\"size\":{\"$gt\":0}},\"fields\":[\"docType\",\"owner\",\"size\"],\"sort\":[{\"size\":\"desc\"}],\"use_index\":\"_design/indexSizeSortDoc\"}"]}' - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -type marble struct { - ObjectType string `json:"docType"` //docType is used to distinguish the various types of objects in state database - Name string `json:"name"` //the fieldtags are needed to keep case from bouncing around - Color string `json:"color"` - Size int `json:"size"` - Owner string `json:"owner"` -} - -type marblePrivateDetails struct { - ObjectType string `json:"docType"` //docType is used to distinguish the various types of objects in state database - Name string `json:"name"` //the fieldtags are needed to keep case from bouncing around - Price int `json:"price"` -} - -// =================================================================================== -// Main -// =================================================================================== -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} - -// Init initializes chaincode -// =========================== -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke - Our entry point for Invocations -// ======================================== -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - fmt.Println("invoke is running " + function) - - // Handle different functions - switch function { - case "initMarble": - //create a new marble - return t.initMarble(stub, args) - case "readMarble": - //read a marble - return t.readMarble(stub, args) - case "readMarblePrivateDetails": - //read a marble private details - return t.readMarblePrivateDetails(stub, args) - case "transferMarble": - //change owner of a specific marble - return t.transferMarble(stub, args) - case "delete": - //delete a marble - return t.delete(stub, args) - case "queryMarblesByOwner": - //find marbles for owner X using rich query - return t.queryMarblesByOwner(stub, args) - case "queryMarbles": - //find marbles based on an ad hoc rich query - return t.queryMarbles(stub, args) - case "getMarblesByRange": - //get marbles based on range query - return t.getMarblesByRange(stub, args) - default: - //error - fmt.Println("invoke did not find func: " + function) - return shim.Error("Received unknown function invocation") - } -} - -// ============================================================ -// initMarble - create a new marble, store into chaincode state -// ============================================================ -func (t *SimpleChaincode) initMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var err error - - type marbleTransientInput struct { - Name string `json:"name"` //the fieldtags are needed to keep case from bouncing around - Color string `json:"color"` - Size int `json:"size"` - Owner string `json:"owner"` - Price int `json:"price"` - } - - // ==== Input sanitation ==== - fmt.Println("- start init marble") - - if len(args) != 0 { - return shim.Error("Incorrect number of arguments. Private marble data must be passed in transient map.") - } - - transMap, err := stub.GetTransient() - if err != nil { - return shim.Error("Error getting transient: " + err.Error()) - } - - if _, ok := transMap["marble"]; !ok { - return shim.Error("marble must be a key in the transient map") - } - - if len(transMap["marble"]) == 0 { - return shim.Error("marble value in the transient map must be a non-empty JSON string") - } - - var marbleInput marbleTransientInput - err = json.Unmarshal(transMap["marble"], &marbleInput) - if err != nil { - return shim.Error("Failed to decode JSON of: " + string(transMap["marble"])) - } - - if len(marbleInput.Name) == 0 { - return shim.Error("name field must be a non-empty string") - } - if len(marbleInput.Color) == 0 { - return shim.Error("color field must be a non-empty string") - } - if marbleInput.Size <= 0 { - return shim.Error("size field must be a positive integer") - } - if len(marbleInput.Owner) == 0 { - return shim.Error("owner field must be a non-empty string") - } - if marbleInput.Price <= 0 { - return shim.Error("price field must be a positive integer") - } - - // ==== Check if marble already exists ==== - marbleAsBytes, err := stub.GetPrivateData("collectionMarbles", marbleInput.Name) - if err != nil { - return shim.Error("Failed to get marble: " + err.Error()) - } else if marbleAsBytes != nil { - fmt.Println("This marble already exists: " + marbleInput.Name) - return shim.Error("This marble already exists: " + marbleInput.Name) - } - - // ==== Create marble object, marshal to JSON, and save to state ==== - marble := &marble{ - ObjectType: "marble", - Name: marbleInput.Name, - Color: marbleInput.Color, - Size: marbleInput.Size, - Owner: marbleInput.Owner, - } - marbleJSONasBytes, err := json.Marshal(marble) - if err != nil { - return shim.Error(err.Error()) - } - - // === Save marble to state === - err = stub.PutPrivateData("collectionMarbles", marbleInput.Name, marbleJSONasBytes) - if err != nil { - return shim.Error(err.Error()) - } - - // ==== Create marble private details object with price, marshal to JSON, and save to state ==== - marblePrivateDetails := &marblePrivateDetails{ - ObjectType: "marblePrivateDetails", - Name: marbleInput.Name, - Price: marbleInput.Price, - } - marblePrivateDetailsBytes, err := json.Marshal(marblePrivateDetails) - if err != nil { - return shim.Error(err.Error()) - } - err = stub.PutPrivateData("collectionMarblePrivateDetails", marbleInput.Name, marblePrivateDetailsBytes) - if err != nil { - return shim.Error(err.Error()) - } - - // ==== Index the marble to enable color-based range queries, e.g. return all blue marbles ==== - // An 'index' is a normal key/value entry in state. - // The key is a composite key, with the elements that you want to range query on listed first. - // In our case, the composite key is based on indexName~color~name. - // This will enable very efficient state range queries based on composite keys matching indexName~color~* - indexName := "color~name" - colorNameIndexKey, err := stub.CreateCompositeKey(indexName, []string{marble.Color, marble.Name}) - if err != nil { - return shim.Error(err.Error()) - } - // Save index entry to state. Only the key name is needed, no need to store a duplicate copy of the marble. - // Note - passing a 'nil' value will effectively delete the key from state, therefore we pass null character as value - value := []byte{0x00} - stub.PutPrivateData("collectionMarbles", colorNameIndexKey, value) - - // ==== Marble saved and indexed. Return success ==== - fmt.Println("- end init marble") - return shim.Success(nil) -} - -// =============================================== -// readMarble - read a marble from chaincode state -// =============================================== -func (t *SimpleChaincode) readMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var name, jsonResp string - var err error - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting name of the marble to query") - } - - name = args[0] - valAsbytes, err := stub.GetPrivateData("collectionMarbles", name) //get the marble from chaincode state - if err != nil { - jsonResp = "{\"Error\":\"Failed to get state for " + name + "\"}" - return shim.Error(jsonResp) - } else if valAsbytes == nil { - jsonResp = "{\"Error\":\"Marble does not exist: " + name + "\"}" - return shim.Error(jsonResp) - } - - return shim.Success(valAsbytes) -} - -// =============================================== -// readMarblereadMarblePrivateDetails - read a marble private details from chaincode state -// =============================================== -func (t *SimpleChaincode) readMarblePrivateDetails(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var name, jsonResp string - var err error - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting name of the marble to query") - } - - name = args[0] - valAsbytes, err := stub.GetPrivateData("collectionMarblePrivateDetails", name) //get the marble private details from chaincode state - if err != nil { - jsonResp = "{\"Error\":\"Failed to get private details for " + name + ": " + err.Error() + "\"}" - return shim.Error(jsonResp) - } else if valAsbytes == nil { - jsonResp = "{\"Error\":\"Marble private details does not exist: " + name + "\"}" - return shim.Error(jsonResp) - } - - return shim.Success(valAsbytes) -} - -// ================================================== -// delete - remove a marble key/value pair from state -// ================================================== -func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response { - fmt.Println("- start delete marble") - - type marbleDeleteTransientInput struct { - Name string `json:"name"` - } - - if len(args) != 0 { - return shim.Error("Incorrect number of arguments. Private marble name must be passed in transient map.") - } - - transMap, err := stub.GetTransient() - if err != nil { - return shim.Error("Error getting transient: " + err.Error()) - } - - if _, ok := transMap["marble_delete"]; !ok { - return shim.Error("marble_delete must be a key in the transient map") - } - - if len(transMap["marble_delete"]) == 0 { - return shim.Error("marble_delete value in the transient map must be a non-empty JSON string") - } - - var marbleDeleteInput marbleDeleteTransientInput - err = json.Unmarshal(transMap["marble_delete"], &marbleDeleteInput) - if err != nil { - return shim.Error("Failed to decode JSON of: " + string(transMap["marble_delete"])) - } - - if len(marbleDeleteInput.Name) == 0 { - return shim.Error("name field must be a non-empty string") - } - - // to maintain the color~name index, we need to read the marble first and get its color - valAsbytes, err := stub.GetPrivateData("collectionMarbles", marbleDeleteInput.Name) //get the marble from chaincode state - if err != nil { - return shim.Error("Failed to get state for " + marbleDeleteInput.Name) - } else if valAsbytes == nil { - return shim.Error("Marble does not exist: " + marbleDeleteInput.Name) - } - - var marbleToDelete marble - err = json.Unmarshal([]byte(valAsbytes), &marbleToDelete) - if err != nil { - return shim.Error("Failed to decode JSON of: " + string(valAsbytes)) - } - - // delete the marble from state - err = stub.DelPrivateData("collectionMarbles", marbleDeleteInput.Name) - if err != nil { - return shim.Error("Failed to delete state:" + err.Error()) - } - - // Also delete the marble from the color~name index - indexName := "color~name" - colorNameIndexKey, err := stub.CreateCompositeKey(indexName, []string{marbleToDelete.Color, marbleToDelete.Name}) - if err != nil { - return shim.Error(err.Error()) - } - err = stub.DelPrivateData("collectionMarbles", colorNameIndexKey) - if err != nil { - return shim.Error("Failed to delete state:" + err.Error()) - } - - // Finally, delete private details of marble - err = stub.DelPrivateData("collectionMarblePrivateDetails", marbleDeleteInput.Name) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -// =========================================================== -// transfer a marble by setting a new owner name on the marble -// =========================================================== -func (t *SimpleChaincode) transferMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - fmt.Println("- start transfer marble") - - type marbleTransferTransientInput struct { - Name string `json:"name"` - Owner string `json:"owner"` - } - - if len(args) != 0 { - return shim.Error("Incorrect number of arguments. Private marble data must be passed in transient map.") - } - - transMap, err := stub.GetTransient() - if err != nil { - return shim.Error("Error getting transient: " + err.Error()) - } - - if _, ok := transMap["marble_owner"]; !ok { - return shim.Error("marble_owner must be a key in the transient map") - } - - if len(transMap["marble_owner"]) == 0 { - return shim.Error("marble_owner value in the transient map must be a non-empty JSON string") - } - - var marbleTransferInput marbleTransferTransientInput - err = json.Unmarshal(transMap["marble_owner"], &marbleTransferInput) - if err != nil { - return shim.Error("Failed to decode JSON of: " + string(transMap["marble_owner"])) - } - - if len(marbleTransferInput.Name) == 0 { - return shim.Error("name field must be a non-empty string") - } - if len(marbleTransferInput.Owner) == 0 { - return shim.Error("owner field must be a non-empty string") - } - - marbleAsBytes, err := stub.GetPrivateData("collectionMarbles", marbleTransferInput.Name) - if err != nil { - return shim.Error("Failed to get marble:" + err.Error()) - } else if marbleAsBytes == nil { - return shim.Error("Marble does not exist: " + marbleTransferInput.Name) - } - - marbleToTransfer := marble{} - err = json.Unmarshal(marbleAsBytes, &marbleToTransfer) //unmarshal it aka JSON.parse() - if err != nil { - return shim.Error(err.Error()) - } - marbleToTransfer.Owner = marbleTransferInput.Owner //change the owner - - marbleJSONasBytes, _ := json.Marshal(marbleToTransfer) - err = stub.PutPrivateData("collectionMarbles", marbleToTransfer.Name, marbleJSONasBytes) //rewrite the marble - if err != nil { - return shim.Error(err.Error()) - } - - fmt.Println("- end transferMarble (success)") - return shim.Success(nil) -} - -// =========================================================================================== -// getMarblesByRange performs a range query based on the start and end keys provided. - -// Read-only function results are not typically submitted to ordering. If the read-only -// results are submitted to ordering, or if the query is used in an update transaction -// and submitted to ordering, then the committing peers will re-execute to guarantee that -// result sets are stable between endorsement time and commit time. The transaction is -// invalidated by the committing peers if the result set has changed between endorsement -// time and commit time. -// Therefore, range queries are a safe option for performing update transactions based on query results. -// =========================================================================================== -func (t *SimpleChaincode) getMarblesByRange(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - startKey := args[0] - endKey := args[1] - - resultsIterator, err := stub.GetPrivateDataByRange("collectionMarbles", startKey, endKey) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - // buffer is a JSON array containing QueryResults - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - queryResponse, err := resultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"Key\":") - buffer.WriteString("\"") - buffer.WriteString(queryResponse.Key) - buffer.WriteString("\"") - - buffer.WriteString(", \"Record\":") - // Record is a JSON object, so we write as-is - buffer.WriteString(string(queryResponse.Value)) - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Printf("- getMarblesByRange queryResult:\n%s\n", buffer.String()) - - return shim.Success(buffer.Bytes()) -} - -// =======Rich queries ========================================================================= -// Two examples of rich queries are provided below (parameterized query and ad hoc query). -// Rich queries pass a query string to the state database. -// Rich queries are only supported by state database implementations -// that support rich query (e.g. CouchDB). -// The query string is in the syntax of the underlying state database. -// With rich queries there is no guarantee that the result set hasn't changed between -// endorsement time and commit time, aka 'phantom reads'. -// Therefore, rich queries should not be used in update transactions, unless the -// application handles the possibility of result set changes between endorsement and commit time. -// Rich queries can be used for point-in-time queries against a peer. -// ============================================================================================ - -// ===== Example: Parameterized rich query ================================================= -// queryMarblesByOwner queries for marbles based on a passed in owner. -// This is an example of a parameterized query where the query logic is baked into the chaincode, -// and accepting a single query parameter (owner). -// Only available on state databases that support rich query (e.g. CouchDB) -// ========================================================================================= -func (t *SimpleChaincode) queryMarblesByOwner(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "bob" - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - owner := strings.ToLower(args[0]) - - queryString := fmt.Sprintf("{\"selector\":{\"docType\":\"marble\",\"owner\":\"%s\"}}", owner) - - queryResults, err := getQueryResultForQueryString(stub, queryString) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ===== Example: Ad hoc rich query ======================================================== -// queryMarbles uses a query string to perform a query for marbles. -// Query string matching state database syntax is passed in and executed as is. -// Supports ad hoc queries that can be defined at runtime by the client. -// If this is not desired, follow the queryMarblesForOwner example for parameterized queries. -// Only available on state databases that support rich query (e.g. CouchDB) -// ========================================================================================= -func (t *SimpleChaincode) queryMarbles(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "queryString" - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - queryString := args[0] - - queryResults, err := getQueryResultForQueryString(stub, queryString) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ========================================================================================= -// getQueryResultForQueryString executes the passed in query string. -// Result set is built and returned as a byte array containing the JSON results. -// ========================================================================================= -func getQueryResultForQueryString(stub shim.ChaincodeStubInterface, queryString string) ([]byte, error) { - - fmt.Printf("- getQueryResultForQueryString queryString:\n%s\n", queryString) - - resultsIterator, err := stub.GetPrivateDataQueryResult("collectionMarbles", queryString) - if err != nil { - return nil, err - } - defer resultsIterator.Close() - - // buffer is a JSON array containing QueryRecords - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - queryResponse, err := resultsIterator.Next() - if err != nil { - return nil, err - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"Key\":") - buffer.WriteString("\"") - buffer.WriteString(queryResponse.Key) - buffer.WriteString("\"") - - buffer.WriteString(", \"Record\":") - // Record is a JSON object, so we write as-is - buffer.WriteString(string(queryResponse.Value)) - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Printf("- getQueryResultForQueryString queryResult:\n%s\n", buffer.String()) - - return buffer.Bytes(), nil -} diff --git a/app/platform/fabric/e2e-test/fabric-samples/chaincode/sacc/sacc.go b/app/platform/fabric/e2e-test/fabric-samples/chaincode/sacc/sacc.go deleted file mode 100644 index 4bfb165b5..000000000 --- a/app/platform/fabric/e2e-test/fabric-samples/chaincode/sacc/sacc.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright IBM Corp All Rights Reserved - * - * SPDX-License-Identifier: Apache-2.0 - */ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleAsset implements a simple chaincode to manage an asset -type SimpleAsset struct { -} - -// Init is called during chaincode instantiation to initialize any -// data. Note that chaincode upgrade also calls this function to reset -// or to migrate data. -func (t *SimpleAsset) Init(stub shim.ChaincodeStubInterface) peer.Response { - // Get the args from the transaction proposal - args := stub.GetStringArgs() - if len(args) != 2 { - return shim.Error("Incorrect arguments. Expecting a key and a value") - } - - // Set up any variables or assets here by calling stub.PutState() - - // We store the key and the value on the ledger - err := stub.PutState(args[0], []byte(args[1])) - if err != nil { - return shim.Error(fmt.Sprintf("Failed to create asset: %s", args[0])) - } - return shim.Success(nil) -} - -// Invoke is called per transaction on the chaincode. Each transaction is -// either a 'get' or a 'set' on the asset created by Init function. The Set -// method may create a new asset by specifying a new key-value pair. -func (t *SimpleAsset) Invoke(stub shim.ChaincodeStubInterface) peer.Response { - // Extract the function and args from the transaction proposal - fn, args := stub.GetFunctionAndParameters() - - var result string - var err error - if fn == "set" { - result, err = set(stub, args) - } else { // assume 'get' even if fn is nil - result, err = get(stub, args) - } - if err != nil { - return shim.Error(err.Error()) - } - - // Return the result as success payload - return shim.Success([]byte(result)) -} - -// Set stores the asset (both key and value) on the ledger. If the key exists, -// it will override the value with the new one -func set(stub shim.ChaincodeStubInterface, args []string) (string, error) { - if len(args) != 2 { - return "", fmt.Errorf("Incorrect arguments. Expecting a key and a value") - } - - err := stub.PutState(args[0], []byte(args[1])) - if err != nil { - return "", fmt.Errorf("Failed to set asset: %s", args[0]) - } - return args[1], nil -} - -// Get returns the value of the specified asset key -func get(stub shim.ChaincodeStubInterface, args []string) (string, error) { - if len(args) != 1 { - return "", fmt.Errorf("Incorrect arguments. Expecting a key") - } - - value, err := stub.GetState(args[0]) - if err != nil { - return "", fmt.Errorf("Failed to get asset: %s with error: %s", args[0], err) - } - if value == nil { - return "", fmt.Errorf("Asset not found: %s", args[0]) - } - return string(value), nil -} - -// main function starts up the chaincode in the container during instantiate -func main() { - if err := shim.Start(new(SimpleAsset)); err != nil { - fmt.Printf("Error starting SimpleAsset chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/README.md b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/README.md deleted file mode 100644 index 84b85b921..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Using EncCC - -To test `EncCC` you need to first generate an AES 256 bit key as a base64 -encoded string so that it can be passed as JSON to the peer chaincode -invoke's transient parameter. - -Note: Before getting started you must use govendor to add external dependencies. Please issue the following commands inside the "enccc_example" folder: -``` -govendor init -govendor add +external -``` - -Let's generate the encryption and decryption keys. The example will simulate a shared key so the key is used for both encryption and decryption. -``` -ENCKEY=`openssl rand 32 -base64` && DECKEY=$ENCKEY -``` - -At this point, you can invoke the chaincode to encrypt key-value pairs as -follows: - -Note: the following assumes the env is initialized and peer has joined channel "my-ch". -``` -peer chaincode invoke -n enccc -C my-ch -c '{"Args":["ENCRYPT","key1","value1"]}' --transient "{\"ENCKEY\":\"$ENCKEY\"}" -``` - -This call will encrypt using a random IV. This may be undesirable for -instance if the chaincode invocation needs to be endorsed by multiple -peers since it would cause the endorsement of conflicting read/write sets. -It is possible to encrypt deterministically by specifying the IV, as -follows: at first the IV must be created - -``` -IV=`openssl rand 16 -base64` -``` - -Then, the IV may be specified in the transient field - -``` -peer chaincode invoke -n enccc -C my-ch -c '{"Args":["ENCRYPT","key2","value2"]}' --transient "{\"ENCKEY\":\"$ENCKEY\",\"IV\":\"$IV\"}" -``` - -Two such invocations will produce equal KVS writes, which can be endorsed by multiple nodes. - -The value can be retrieved back as follows - -``` -peer chaincode query -n enccc -C my-ch -c '{"Args":["DECRYPT","key1"]}' --transient "{\"DECKEY\":\"$DECKEY\"}" -``` -``` -peer chaincode query -n enccc -C my-ch -c '{"Args":["DECRYPT","key2"]}' --transient "{\"DECKEY\":\"$DECKEY\"}" -``` -Note that in this case we use a chaincode query operation; while the use of the -transient field guarantees that the content will not be written to the ledger, -the chaincode decrypts the message and puts it in the proposal response. An -invocation would persist the result in the ledger for all channel readers to -see whereas a query can be discarded and so the result remains confidential. - -To test signing and verifying, you also need to generate an ECDSA key for the appropriate -curve, as follows. - -``` -On Intel: -SIGKEY=`openssl ecparam -name prime256v1 -genkey | tail -n5 | base64 -w0` && VERKEY=$SIGKEY - -On Mac: -SIGKEY=`openssl ecparam -name prime256v1 -genkey | tail -n5 | base64` && VERKEY=$SIGKEY -``` - -At this point, you can invoke the chaincode to sign and then encrypt key-value -pairs as follows - -``` -peer chaincode invoke -n enccc -C my-ch -c '{"Args":["ENCRYPTSIGN","key3","value3"]}' --transient "{\"ENCKEY\":\"$ENCKEY\",\"SIGKEY\":\"$SIGKEY\"}" -``` - -And similarly to retrieve them using a query - -``` -peer chaincode query -n enccc -C my-ch -c '{"Args":["DECRYPTVERIFY","key3"]}' --transient "{\"DECKEY\":\"$DECKEY\",\"VERKEY\":\"$VERKEY\"}" -``` diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/enccc_example.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/enccc_example.go deleted file mode 100644 index 284695d33..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/enccc_example.go +++ /dev/null @@ -1,223 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/bccsp" - "github.com/hyperledger/fabric/bccsp/factory" - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/core/chaincode/shim/ext/entities" - pb "github.com/hyperledger/fabric/protos/peer" -) - -const DECKEY = "DECKEY" -const VERKEY = "VERKEY" -const ENCKEY = "ENCKEY" -const SIGKEY = "SIGKEY" -const IV = "IV" - -// EncCC example simple Chaincode implementation of a chaincode that uses encryption/signatures -type EncCC struct { - bccspInst bccsp.BCCSP -} - -// Init does nothing for this cc -func (t *EncCC) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Encrypter exposes how to write state to the ledger after having -// encrypted it with an AES 256 bit key that has been provided to the chaincode through the -// transient field -func (t *EncCC) Encrypter(stub shim.ChaincodeStubInterface, args []string, encKey, IV []byte) pb.Response { - // create the encrypter entity - we give it an ID, the bccsp instance, the key and (optionally) the IV - ent, err := entities.NewAES256EncrypterEntity("ID", t.bccspInst, encKey, IV) - if err != nil { - return shim.Error(fmt.Sprintf("entities.NewAES256EncrypterEntity failed, err %s", err)) - } - - if len(args) != 2 { - return shim.Error("Expected 2 parameters to function Encrypter") - } - - key := args[0] - cleartextValue := []byte(args[1]) - - // here, we encrypt cleartextValue and assign it to key - err = encryptAndPutState(stub, ent, key, cleartextValue) - if err != nil { - return shim.Error(fmt.Sprintf("encryptAndPutState failed, err %+v", err)) - } - return shim.Success(nil) -} - -// Decrypter exposes how to read from the ledger and decrypt using an AES 256 -// bit key that has been provided to the chaincode through the transient field. -func (t *EncCC) Decrypter(stub shim.ChaincodeStubInterface, args []string, decKey, IV []byte) pb.Response { - // create the encrypter entity - we give it an ID, the bccsp instance, the key and (optionally) the IV - ent, err := entities.NewAES256EncrypterEntity("ID", t.bccspInst, decKey, IV) - if err != nil { - return shim.Error(fmt.Sprintf("entities.NewAES256EncrypterEntity failed, err %s", err)) - } - - if len(args) != 1 { - return shim.Error("Expected 1 parameters to function Decrypter") - } - - key := args[0] - - // here we decrypt the state associated to key - cleartextValue, err := getStateAndDecrypt(stub, ent, key) - if err != nil { - return shim.Error(fmt.Sprintf("getStateAndDecrypt failed, err %+v", err)) - } - - // here we return the decrypted value as a result - return shim.Success(cleartextValue) -} - -// EncrypterSigner exposes how to write state to the ledger after having received keys for -// encrypting (AES 256 bit key) and signing (X9.62/SECG curve over a 256 bit prime field) that has been provided to the chaincode through the -// transient field -func (t *EncCC) EncrypterSigner(stub shim.ChaincodeStubInterface, args []string, encKey, sigKey []byte) pb.Response { - // create the encrypter/signer entity - we give it an ID, the bccsp instance and the keys - ent, err := entities.NewAES256EncrypterECDSASignerEntity("ID", t.bccspInst, encKey, sigKey) - if err != nil { - return shim.Error(fmt.Sprintf("entities.NewAES256EncrypterEntity failed, err %s", err)) - } - - if len(args) != 2 { - return shim.Error("Expected 2 parameters to function EncrypterSigner") - } - - key := args[0] - cleartextValue := []byte(args[1]) - - // here, we sign cleartextValue, encrypt it and assign it to key - err = signEncryptAndPutState(stub, ent, key, cleartextValue) - if err != nil { - return shim.Error(fmt.Sprintf("signEncryptAndPutState failed, err %+v", err)) - } - - return shim.Success(nil) -} - -// DecrypterVerify exposes how to get state to the ledger after having received keys for -// decrypting (AES 256 bit key) and verifying (X9.62/SECG curve over a 256 bit prime field) that has been provided to the chaincode through the -// transient field -func (t *EncCC) DecrypterVerify(stub shim.ChaincodeStubInterface, args []string, decKey, verKey []byte) pb.Response { - // create the decrypter/verify entity - we give it an ID, the bccsp instance and the keys - ent, err := entities.NewAES256EncrypterECDSASignerEntity("ID", t.bccspInst, decKey, verKey) - if err != nil { - return shim.Error(fmt.Sprintf("entities.NewAES256DecrypterEntity failed, err %s", err)) - } - - if len(args) != 1 { - return shim.Error("Expected 1 parameters to function DecrypterVerify") - } - key := args[0] - - // here we decrypt the state associated to key and verify it - cleartextValue, err := getStateDecryptAndVerify(stub, ent, key) - if err != nil { - return shim.Error(fmt.Sprintf("getStateDecryptAndVerify failed, err %+v", err)) - } - - // here we return the decrypted and verified value as a result - return shim.Success(cleartextValue) -} - -// RangeDecrypter shows how range queries may be satisfied by using the decrypter -// entity directly to decrypt previously encrypted key-value pairs -func (t *EncCC) RangeDecrypter(stub shim.ChaincodeStubInterface, decKey []byte) pb.Response { - // create the encrypter entity - we give it an ID, the bccsp instance and the key - ent, err := entities.NewAES256EncrypterEntity("ID", t.bccspInst, decKey, nil) - if err != nil { - return shim.Error(fmt.Sprintf("entities.NewAES256EncrypterEntity failed, err %s", err)) - } - - bytes, err := getStateByRangeAndDecrypt(stub, ent, "", "") - if err != nil { - return shim.Error(fmt.Sprintf("getStateByRangeAndDecrypt failed, err %+v", err)) - } - - return shim.Success(bytes) -} - -// Invoke for this chaincode exposes functions to ENCRYPT, DECRYPT transactional -// data. It also supports an example to ENCRYPT and SIGN and DECRYPT and -// VERIFY. The Initialization Vector (IV) can be passed in as a parm to -// ensure peers have deterministic data. -func (t *EncCC) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - // get arguments and transient - f, args := stub.GetFunctionAndParameters() - tMap, err := stub.GetTransient() - if err != nil { - return shim.Error(fmt.Sprintf("Could not retrieve transient, err %s", err)) - } - - switch f { - case "ENCRYPT": - // make sure there's a key in transient - the assumption is that - // it's associated to the string "ENCKEY" - if _, in := tMap[ENCKEY]; !in { - return shim.Error(fmt.Sprintf("Expected transient encryption key %s", ENCKEY)) - } - - return t.Encrypter(stub, args[0:], tMap[ENCKEY], tMap[IV]) - case "DECRYPT": - - // make sure there's a key in transient - the assumption is that - // it's associated to the string "DECKEY" - if _, in := tMap[DECKEY]; !in { - return shim.Error(fmt.Sprintf("Expected transient decryption key %s", DECKEY)) - } - - return t.Decrypter(stub, args[0:], tMap[DECKEY], tMap[IV]) - case "ENCRYPTSIGN": - // make sure keys are there in the transient map - the assumption is that they - // are associated to the string "ENCKEY" and "SIGKEY" - if _, in := tMap[ENCKEY]; !in { - return shim.Error(fmt.Sprintf("Expected transient key %s", ENCKEY)) - } else if _, in := tMap[SIGKEY]; !in { - return shim.Error(fmt.Sprintf("Expected transient key %s", SIGKEY)) - } - - return t.EncrypterSigner(stub, args[0:], tMap[ENCKEY], tMap[SIGKEY]) - case "DECRYPTVERIFY": - // make sure keys are there in the transient map - the assumption is that they - // are associated to the string "DECKEY" and "VERKEY" - if _, in := tMap[DECKEY]; !in { - return shim.Error(fmt.Sprintf("Expected transient key %s", DECKEY)) - } else if _, in := tMap[VERKEY]; !in { - return shim.Error(fmt.Sprintf("Expected transient key %s", VERKEY)) - } - - return t.DecrypterVerify(stub, args[0:], tMap[DECKEY], tMap[VERKEY]) - case "RANGEQUERY": - // make sure there's a key in transient - the assumption is that - // it's associated to the string "ENCKEY" - if _, in := tMap[DECKEY]; !in { - return shim.Error(fmt.Sprintf("Expected transient key %s", DECKEY)) - } - - return t.RangeDecrypter(stub, tMap[DECKEY]) - default: - return shim.Error(fmt.Sprintf("Unsupported function %s", f)) - } -} - -func main() { - factory.InitFactories(nil) - - err := shim.Start(&EncCC{factory.GetDefault()}) - if err != nil { - fmt.Printf("Error starting EncCC chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/enccc_test.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/enccc_test.go deleted file mode 100644 index 7daf28135..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/enccc_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "testing" - - "github.com/hyperledger/fabric/bccsp/factory" - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/stretchr/testify/assert" -) - -const ( - AESKEY1 = "01234567890123456789012345678901" - AESKEY2 = "01234567890123456789012345678902" - ECDSAKEY1 = `-----BEGIN EC PRIVATE KEY----- -MHcCAQEEIH4Uv66F9kZMdOQxwNegkGm8c3AB3nGPOtxNKi6wb/ZooAoGCCqGSM49 -AwEHoUQDQgAEEPE+VLOh+e4NpwIjI/b/fKYHi4weU7r9OTEYPiAJiJBQY6TZnvF5 -oRMvwO4MCYxFtpIRO4UxIgcZBj4NCBxKqQ== ------END EC PRIVATE KEY-----` - ECDSAKEY2 = `-----BEGIN EC PRIVATE KEY----- -MHcCAQEEIE8Seyc9TXx+yQfnGPuzjkuEfMbkq203IYdfyvMd0r3OoAoGCCqGSM49 -AwEHoUQDQgAE4dcGMMroH2LagI/s5i/Bx4t4ggGDoJPNVkKBDBlIaMYjJFYD1obk -JOWqAZxKKsBxBC5Ssu+fS26VPfdNWxDsFQ== ------END EC PRIVATE KEY-----` - IV1 = "0123456789012345" -) - -func TestInit(t *testing.T) { - factory.InitFactories(nil) - - scc := new(EncCC) - stub := shim.NewMockStub("enccc", scc) - stub.MockTransactionStart("a") - res := scc.Init(stub) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) -} - -// unfortunately we can't tese this cc though invoke since the -// mock shim doesn't support transient. We test failure scenarios -// and the tests below focus on the functionality by invoking -// functions as opposed to cc -func TestInvoke(t *testing.T) { - factory.InitFactories(nil) - - scc := &EncCC{factory.GetDefault()} - stub := shim.NewMockStub("enccc", scc) - - res := stub.MockInvoke("tx", [][]byte{[]byte("barf")}) - assert.NotEqual(t, res.Status, int32(shim.OK)) - res = stub.MockInvoke("tx", [][]byte{[]byte("ENC")}) - assert.NotEqual(t, res.Status, int32(shim.OK)) - res = stub.MockInvoke("tx", [][]byte{[]byte("SIG")}) - assert.NotEqual(t, res.Status, int32(shim.OK)) - res = stub.MockInvoke("tx", [][]byte{[]byte("RANGE")}) - assert.NotEqual(t, res.Status, int32(shim.OK)) -} - -func TestEnc(t *testing.T) { - factory.InitFactories(nil) - - scc := &EncCC{factory.GetDefault()} - stub := shim.NewMockStub("enccc", scc) - - // success - stub.MockTransactionStart("a") - res := scc.Encrypter(stub, []string{"key", "value"}, []byte(AESKEY1), nil) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - - // fail - bad key - stub.MockTransactionStart("a") - res = scc.Encrypter(stub, []string{"key", "value"}, []byte("badkey"), nil) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // fail - not enough args - stub.MockTransactionStart("a") - res = scc.Encrypter(stub, []string{"key"}, []byte(AESKEY1), nil) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // success - stub.MockTransactionStart("a") - res = scc.Decrypter(stub, []string{"key"}, []byte(AESKEY1), nil) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - assert.True(t, bytes.Equal(res.Payload, []byte("value"))) - - // fail - not enough args - stub.MockTransactionStart("a") - res = scc.Decrypter(stub, []string{}, []byte(AESKEY1), nil) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // fail - bad kvs key - stub.MockTransactionStart("a") - res = scc.Decrypter(stub, []string{"badkey"}, []byte(AESKEY1), nil) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // fail - bad key - stub.MockTransactionStart("a") - res = scc.Decrypter(stub, []string{"key"}, []byte(AESKEY2), nil) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) -} - -func TestSig(t *testing.T) { - factory.InitFactories(nil) - - scc := &EncCC{factory.GetDefault()} - stub := shim.NewMockStub("enccc", scc) - - // success - stub.MockTransactionStart("a") - res := scc.EncrypterSigner(stub, []string{"key", "value"}, []byte(AESKEY1), []byte(ECDSAKEY1)) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - - // fail - bad key - stub.MockTransactionStart("a") - res = scc.EncrypterSigner(stub, []string{"key", "value"}, []byte(AESKEY1), []byte("barf")) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // fail - bad args - stub.MockTransactionStart("a") - res = scc.EncrypterSigner(stub, []string{"key"}, []byte(AESKEY1), []byte("barf")) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // fail - bad signing key - stub.MockTransactionStart("a") - res = scc.DecrypterVerify(stub, []string{"key"}, []byte(AESKEY1), []byte(ECDSAKEY2)) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // fail - bad args - stub.MockTransactionStart("a") - res = scc.DecrypterVerify(stub, []string{}, []byte(AESKEY1), []byte(ECDSAKEY1)) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // fail - bad kvs key - stub.MockTransactionStart("a") - res = scc.DecrypterVerify(stub, []string{"badkey"}, []byte(AESKEY1), []byte(ECDSAKEY1)) - stub.MockTransactionEnd("a") - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // success - stub.MockTransactionStart("a") - res = scc.EncrypterSigner(stub, []string{"key", "value"}, []byte(AESKEY1), []byte(ECDSAKEY1)) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - - // success - stub.MockTransactionStart("a") - res = scc.DecrypterVerify(stub, []string{"key"}, []byte(AESKEY1), []byte(ECDSAKEY1)) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - assert.True(t, bytes.Equal(res.Payload, []byte("value"))) -} - -func TestEncCC_RangeDecrypter(t *testing.T) { - factory.InitFactories(nil) - - scc := &EncCC{factory.GetDefault()} - stub := shim.NewMockStub("enccc", scc) - - stub.MockTransactionStart("a") - res := scc.Encrypter(stub, []string{"key1", "value1"}, []byte(AESKEY1), nil) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - - stub.MockTransactionStart("a") - res = scc.Encrypter(stub, []string{"key2", "value2"}, []byte(AESKEY1), nil) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - - stub.MockTransactionStart("a") - res = scc.Encrypter(stub, []string{"key3", "value3"}, []byte(AESKEY1), nil) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - - // failed range query - res = scc.RangeDecrypter(stub, nil) - assert.NotEqual(t, res.Status, int32(shim.OK)) - - // success range query - stub.MockTransactionStart("a") - res = scc.RangeDecrypter(stub, []byte(AESKEY1)) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - keys := []keyValuePair{} - err := json.Unmarshal(res.Payload, &keys) - assert.NoError(t, err) - assert.Equal(t, keys[0].Key, "key1") - assert.Equal(t, string(keys[0].Value), "value1") - assert.Equal(t, keys[1].Key, "key2") - assert.Equal(t, string(keys[1].Value), "value2") - assert.Equal(t, keys[2].Key, "key3") - assert.Equal(t, string(keys[2].Value), "value3") - - _, err = getStateByRangeAndDecrypt(stub, nil, string([]byte{0}), string([]byte{0})) - assert.Error(t, err) -} - -func TestDeterministicEncryption(t *testing.T) { - factory.InitFactories(nil) - - scc := &EncCC{factory.GetDefault()} - stub := shim.NewMockStub("enccc", scc) - - stub.MockTransactionStart("a") - res := scc.Encrypter(stub, []string{"key1", "value1"}, []byte(AESKEY1), []byte(IV1)) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - - c1, err := stub.GetState("key1") - assert.NoError(t, err) - assert.NotNil(t, c1) - - stub.MockTransactionStart("a") - res = scc.Encrypter(stub, []string{"key1", "value1"}, []byte(AESKEY1), []byte(IV1)) - stub.MockTransactionEnd("a") - assert.Equal(t, res.Status, int32(shim.OK)) - - c2, err := stub.GetState("key1") - assert.NoError(t, err) - assert.NotNil(t, c1) - assert.True(t, bytes.Equal(c1, c2)) -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/utils.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/utils.go deleted file mode 100644 index 9af6e6fa1..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/enccc_example/utils.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "encoding/json" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/core/chaincode/shim/ext/entities" - "github.com/pkg/errors" -) - -// the functions below show some best practices on how -// to use entities to perform cryptographic operations -// over the ledger state - -// getStateAndDecrypt retrieves the value associated to key, -// decrypts it with the supplied entity and returns the result -// of the decryption -func getStateAndDecrypt(stub shim.ChaincodeStubInterface, ent entities.Encrypter, key string) ([]byte, error) { - // at first we retrieve the ciphertext from the ledger - ciphertext, err := stub.GetState(key) - if err != nil { - return nil, err - } - - // GetState will return a nil slice if the key does not exist. - // Note that the chaincode logic may want to distinguish between - // nil slice (key doesn't exist in state db) and empty slice - // (key found in state db but value is empty). We do not - // distinguish the case here - if len(ciphertext) == 0 { - return nil, errors.New("no ciphertext to decrypt") - } - - return ent.Decrypt(ciphertext) -} - -// encryptAndPutState encrypts the supplied value using the -// supplied entity and puts it to the ledger associated to -// the supplied KVS key -func encryptAndPutState(stub shim.ChaincodeStubInterface, ent entities.Encrypter, key string, value []byte) error { - // at first we use the supplied entity to encrypt the value - ciphertext, err := ent.Encrypt(value) - if err != nil { - return err - } - - return stub.PutState(key, ciphertext) -} - -// getStateDecryptAndVerify retrieves the value associated to key, -// decrypts it with the supplied entity, verifies the signature -// over it and returns the result of the decryption in case of -// success -func getStateDecryptAndVerify(stub shim.ChaincodeStubInterface, ent entities.EncrypterSignerEntity, key string) ([]byte, error) { - // here we retrieve and decrypt the state associated to key - val, err := getStateAndDecrypt(stub, ent, key) - if err != nil { - return nil, err - } - - // we unmarshal a SignedMessage from the decrypted state - msg := &entities.SignedMessage{} - err = msg.FromBytes(val) - if err != nil { - return nil, err - } - - // we verify the signature - ok, err := msg.Verify(ent) - if err != nil { - return nil, err - } else if !ok { - return nil, errors.New("invalid signature") - } - - return msg.Payload, nil -} - -// signEncryptAndPutState signs the supplied value, encrypts -// the supplied value together with its signature using the -// supplied entity and puts it to the ledger associated to -// the supplied KVS key -func signEncryptAndPutState(stub shim.ChaincodeStubInterface, ent entities.EncrypterSignerEntity, key string, value []byte) error { - // here we create a SignedMessage, set its payload - // to value and the ID of the entity and - // sign it with the entity - msg := &entities.SignedMessage{Payload: value, ID: []byte(ent.ID())} - err := msg.Sign(ent) - if err != nil { - return err - } - - // here we serialize the SignedMessage - b, err := msg.ToBytes() - if err != nil { - return err - } - - // here we encrypt the serialized version associated to args[0] - return encryptAndPutState(stub, ent, key, b) -} - -type keyValuePair struct { - Key string `json:"key"` - Value string `json:"value"` -} - -// getStateByRangeAndDecrypt retrieves a range of KVS pairs from the -// ledger and decrypts each value with the supplied entity; it returns -// a json-marshalled slice of keyValuePair -func getStateByRangeAndDecrypt(stub shim.ChaincodeStubInterface, ent entities.Encrypter, startKey, endKey string) ([]byte, error) { - // we call get state by range to go through the entire range - iterator, err := stub.GetStateByRange(startKey, endKey) - if err != nil { - return nil, err - } - defer iterator.Close() - - // we decrypt each entry - the assumption is that they have all been encrypted with the same key - keyvalueset := []keyValuePair{} - for iterator.HasNext() { - el, err := iterator.Next() - if err != nil { - return nil, err - } - - v, err := ent.Decrypt(el.Value) - if err != nil { - return nil, err - } - - keyvalueset = append(keyvalueset, keyValuePair{el.Key, string(v)}) - } - - bytes, err := json.Marshal(keyvalueset) - if err != nil { - return nil, err - } - - return bytes, nil -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/eventsender/eventsender.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/eventsender/eventsender.go deleted file mode 100644 index 456062521..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/eventsender/eventsender.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// EventSender example simple Chaincode implementation -type EventSender struct { -} - -// Init function -func (t *EventSender) Init(stub shim.ChaincodeStubInterface) pb.Response { - err := stub.PutState("noevents", []byte("0")) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -// Invoke function -func (t *EventSender) invoke(stub shim.ChaincodeStubInterface, args []string) pb.Response { - b, err := stub.GetState("noevents") - if err != nil { - return shim.Error("Failed to get state") - } - noevts, _ := strconv.Atoi(string(b)) - - tosend := "Event " + string(b) - for _, s := range args { - tosend = tosend + "," + s - } - - err = stub.PutState("noevents", []byte(strconv.Itoa(noevts+1))) - if err != nil { - return shim.Error(err.Error()) - } - - err = stub.SetEvent("evtsender", []byte(tosend)) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(nil) -} - -// Query function -func (t *EventSender) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { - b, err := stub.GetState("noevents") - if err != nil { - return shim.Error("Failed to get state") - } - jsonResp := "{\"NoEvents\":\"" + string(b) + "\"}" - return shim.Success([]byte(jsonResp)) -} - -func (t *EventSender) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - if function == "invoke" { - return t.invoke(stub, args) - } else if function == "query" { - return t.query(stub, args) - } - - return shim.Error("Invalid invoke function name. Expecting \"invoke\" \"query\"") -} - -func main() { - err := shim.Start(new(EventSender)) - if err != nil { - fmt.Printf("Error starting EventSender chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example01/chaincode.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example01/chaincode.go deleted file mode 100644 index f2d9f1d6b..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example01/chaincode.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package example01 - -import ( - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct{} - -var A, B string -var Aval, Bval, X int - -// Init callback representing the invocation of a chaincode -// This chaincode will manage two accounts A and B and will transfer X units from A to B upon invoke -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - var err error - _, args := stub.GetFunctionAndParameters() - if len(args) != 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - // Initialize the chaincode - A = args[0] - Aval, err = strconv.Atoi(args[1]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - B = args[2] - Bval, err = strconv.Atoi(args[3]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) - - return shim.Success(nil) -} - -func (t *SimpleChaincode) invoke(stub shim.ChaincodeStubInterface, args []string) pb.Response { - // Transaction makes payment of X units from A to B - X, err := strconv.Atoi(args[0]) - if err != nil { - fmt.Printf("Error convert %s to integer: %s", args[0], err) - return shim.Error(fmt.Sprintf("Error convert %s to integer: %s", args[0], err)) - } - Aval = Aval - X - Bval = Bval + X - ts, err2 := stub.GetTxTimestamp() - if err2 != nil { - fmt.Printf("Error getting transaction timestamp: %s", err2) - return shim.Error(fmt.Sprintf("Error getting transaction timestamp: %s", err2)) - } - fmt.Printf("Transaction Time: %v,Aval = %d, Bval = %d\n", ts, Aval, Bval) - return shim.Success(nil) -} - -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - if function == "invoke" { - return t.invoke(stub, args) - } - - return shim.Error("Invalid invoke function name. Expecting \"invoke\"") -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example01/cmd/main.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example01/cmd/main.go deleted file mode 100644 index 96e6db659..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example01/cmd/main.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/examples/chaincode/go/example01" -) - -func main() { - err := shim.Start(new(example01.SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/chaincode.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/chaincode.go deleted file mode 100644 index 87629f406..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/chaincode.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package example02 - -import ( - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - fmt.Println("ex02 Init") - _, args := stub.GetFunctionAndParameters() - var A, B string // Entities - var Aval, Bval int // Asset holdings - var err error - - if len(args) != 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - // Initialize the chaincode - A = args[0] - Aval, err = strconv.Atoi(args[1]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - B = args[2] - Bval, err = strconv.Atoi(args[3]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) - - // Write the state to the ledger - err = stub.PutState(A, []byte(strconv.Itoa(Aval))) - if err != nil { - return shim.Error(err.Error()) - } - - err = stub.PutState(B, []byte(strconv.Itoa(Bval))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - fmt.Println("ex02 Invoke") - function, args := stub.GetFunctionAndParameters() - if function == "invoke" { - // Make payment of X units from A to B - return t.invoke(stub, args) - } else if function == "delete" { - // Deletes an entity from its state - return t.delete(stub, args) - } else if function == "query" { - // the old "Query" is now implemtned in invoke - return t.query(stub, args) - } - - return shim.Error("Invalid invoke function name. Expecting \"invoke\" \"delete\" \"query\"") -} - -// Transaction makes payment of X units from A to B -func (t *SimpleChaincode) invoke(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var A, B string // Entities - var Aval, Bval int // Asset holdings - var X int // Transaction value - var err error - - if len(args) != 3 { - return shim.Error("Incorrect number of arguments. Expecting 3") - } - - A = args[0] - B = args[1] - - // Get the state from the ledger - // TODO: will be nice to have a GetAllState call to ledger - Avalbytes, err := stub.GetState(A) - if err != nil { - return shim.Error("Failed to get state") - } - if Avalbytes == nil { - return shim.Error("Entity not found") - } - Aval, _ = strconv.Atoi(string(Avalbytes)) - - Bvalbytes, err := stub.GetState(B) - if err != nil { - return shim.Error("Failed to get state") - } - if Bvalbytes == nil { - return shim.Error("Entity not found") - } - Bval, _ = strconv.Atoi(string(Bvalbytes)) - - // Perform the execution - X, err = strconv.Atoi(args[2]) - if err != nil { - return shim.Error("Invalid transaction amount, expecting a integer value") - } - Aval = Aval - X - Bval = Bval + X - fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) - - // Write the state back to the ledger - err = stub.PutState(A, []byte(strconv.Itoa(Aval))) - if err != nil { - return shim.Error(err.Error()) - } - - err = stub.PutState(B, []byte(strconv.Itoa(Bval))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -// Deletes an entity from state -func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - A := args[0] - - // Delete the key from the state in ledger - err := stub.DelState(A) - if err != nil { - return shim.Error("Failed to delete state") - } - - return shim.Success(nil) -} - -// query callback representing the query of a chaincode -func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var A string // Entities - var err error - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting name of the person to query") - } - - A = args[0] - - // Get the state from the ledger - Avalbytes, err := stub.GetState(A) - if err != nil { - jsonResp := "{\"Error\":\"Failed to get state for " + A + "\"}" - return shim.Error(jsonResp) - } - - if Avalbytes == nil { - jsonResp := "{\"Error\":\"Nil amount for " + A + "\"}" - return shim.Error(jsonResp) - } - - jsonResp := "{\"Name\":\"" + A + "\",\"Amount\":\"" + string(Avalbytes) + "\"}" - fmt.Printf("Query Response:%s\n", jsonResp) - return shim.Success(Avalbytes) -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/chaincode_test.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/chaincode_test.go deleted file mode 100644 index 72345d765..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/chaincode_test.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package example02 - -import ( - "fmt" - "testing" - - "github.com/hyperledger/fabric/core/chaincode/shim" -) - -func checkInit(t *testing.T, stub *shim.MockStub, args [][]byte) { - res := stub.MockInit("1", args) - if res.Status != shim.OK { - fmt.Println("Init failed", string(res.Message)) - t.FailNow() - } -} - -func checkState(t *testing.T, stub *shim.MockStub, name string, value string) { - bytes := stub.State[name] - if bytes == nil { - fmt.Println("State", name, "failed to get value") - t.FailNow() - } - if string(bytes) != value { - fmt.Println("State value", name, "was not", value, "as expected") - t.FailNow() - } -} - -func checkQuery(t *testing.T, stub *shim.MockStub, name string, value string) { - res := stub.MockInvoke("1", [][]byte{[]byte("query"), []byte(name)}) - if res.Status != shim.OK { - fmt.Println("Query", name, "failed", string(res.Message)) - t.FailNow() - } - if res.Payload == nil { - fmt.Println("Query", name, "failed to get value") - t.FailNow() - } - if string(res.Payload) != value { - fmt.Println("Query value", name, "was not", value, "as expected") - t.FailNow() - } -} - -func checkInvoke(t *testing.T, stub *shim.MockStub, args [][]byte) { - res := stub.MockInvoke("1", args) - if res.Status != shim.OK { - fmt.Println("Invoke", args, "failed", string(res.Message)) - t.FailNow() - } -} - -func TestExample02_Init(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex02", scc) - - // Init A=123 B=234 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("A"), []byte("123"), []byte("B"), []byte("234")}) - - checkState(t, stub, "A", "123") - checkState(t, stub, "B", "234") -} - -func TestExample02_Query(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex02", scc) - - // Init A=345 B=456 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("A"), []byte("345"), []byte("B"), []byte("456")}) - - // Query A - checkQuery(t, stub, "A", "345") - - // Query B - checkQuery(t, stub, "B", "456") -} - -func TestExample02_Invoke(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex02", scc) - - // Init A=567 B=678 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("A"), []byte("567"), []byte("B"), []byte("678")}) - - // Invoke A->B for 123 - checkInvoke(t, stub, [][]byte{[]byte("invoke"), []byte("A"), []byte("B"), []byte("123")}) - checkQuery(t, stub, "A", "444") - checkQuery(t, stub, "B", "801") - - // Invoke B->A for 234 - checkInvoke(t, stub, [][]byte{[]byte("invoke"), []byte("B"), []byte("A"), []byte("234")}) - checkQuery(t, stub, "A", "678") - checkQuery(t, stub, "B", "567") - checkQuery(t, stub, "A", "678") - checkQuery(t, stub, "B", "567") -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/cmd/main.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/cmd/main.go deleted file mode 100644 index ab27c8529..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example02/cmd/main.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/examples/chaincode/go/example02" -) - -func main() { - err := shim.Start(new(example02.SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/chaincode.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/chaincode.go deleted file mode 100644 index fe627d8c7..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/chaincode.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -// This program is an erroneous chaincode program that attempts to put state in query context - query should return error -package example03 - -import ( - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct{} - -// Init takes a string and int. These are stored as a key/value pair in the state -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - var A string // Entity - var Aval int // Asset holding - var err error - _, args := stub.GetFunctionAndParameters() - if len(args) != 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - // Initialize the chaincode - A = args[0] - Aval, err = strconv.Atoi(args[1]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - fmt.Printf("Aval = %d\n", Aval) - - // Write the state to the ledger - this put is legal within Run - err = stub.PutState(A, []byte(strconv.Itoa(Aval))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -// Invoke is a no-op -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - if function == "query" { - return t.query(stub, args) - } - - return shim.Error("Invalid invoke function name. Expecting \"query\"") -} - -func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var A string // Entity - var Aval int // Asset holding - var err error - - if len(args) != 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - A = args[0] - Aval, err = strconv.Atoi(args[1]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - fmt.Printf("Aval = %d\n", Aval) - - // Write the state to the ledger - this put is illegal within Run - err = stub.PutState(A, []byte(strconv.Itoa(Aval))) - if err != nil { - jsonResp := "{\"Error\":\"Cannot put state within chaincode query\"}" - return shim.Error(jsonResp) - } - - return shim.Success(nil) -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/chaincode_test.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/chaincode_test.go deleted file mode 100644 index a65c87a9e..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/chaincode_test.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package example03 - -import ( - "fmt" - "testing" - - "github.com/hyperledger/fabric/core/chaincode/shim" -) - -func checkInit(t *testing.T, scc *SimpleChaincode, stub *shim.MockStub, args [][]byte) { - res := stub.MockInit("1", args) - if res.Status != shim.OK { - fmt.Println("Init failed", res.Message) - t.FailNow() - } -} - -func checkState(t *testing.T, stub *shim.MockStub, name string, value string) { - bytes := stub.State[name] - if bytes == nil { - fmt.Println("State", name, "failed to get value") - t.FailNow() - } - if string(bytes) != value { - fmt.Println("State value", name, "was not", value, "as expected") - t.FailNow() - } -} - -func checkInvoke(t *testing.T, scc *SimpleChaincode, stub *shim.MockStub, args [][]byte) { - res := stub.MockInvoke("1", args) - if res.Status != shim.OK { - fmt.Println("Query failed", string(res.Message)) - t.FailNow() - } -} - -func TestExample03_Init(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex03", scc) - - // Init A=123 B=234 - checkInit(t, scc, stub, [][]byte{[]byte("init"), []byte("A"), []byte("123")}) - - checkState(t, stub, "A", "123") -} - -func TestExample03_Invoke(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex03", scc) - - // Init A=345 B=456 - checkInit(t, scc, stub, [][]byte{[]byte("init"), []byte("A"), []byte("345")}) - - // Invoke "query" - checkInvoke(t, scc, stub, [][]byte{[]byte("query"), []byte("A"), []byte("345")}) -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/cmd/main.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/cmd/main.go deleted file mode 100644 index cb4569715..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example03/cmd/main.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/examples/chaincode/go/example03" -) - -func main() { - err := shim.Start(new(example03.SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/chaincode.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/chaincode.go deleted file mode 100644 index dd3d5d585..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/chaincode.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package example04 - -import ( - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// This chaincode is a test for chaincode invoking another chaincode - invokes chaincode_example02 - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct{} - -func toChaincodeArgs(args ...string) [][]byte { - bargs := make([][]byte, len(args)) - for i, arg := range args { - bargs[i] = []byte(arg) - } - return bargs -} - -// Init takes two arguments, a string and int. These are stored in the key/value pair in the state -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - var event string // Indicates whether event has happened. Initially 0 - var eventVal int // State of event - var err error - _, args := stub.GetFunctionAndParameters() - if len(args) != 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - // Initialize the chaincode - event = args[0] - eventVal, err = strconv.Atoi(args[1]) - if err != nil { - return shim.Error("Expecting integer value for event status") - } - fmt.Printf("eventVal = %d\n", eventVal) - - err = stub.PutState(event, []byte(strconv.Itoa(eventVal))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -// Invoke invokes another chaincode - chaincode_example02, upon receipt of an event and changes event state -func (t *SimpleChaincode) invoke(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var event string // Event entity - var eventVal int // State of event - var err error - - if len(args) != 3 && len(args) != 4 { - return shim.Error("Incorrect number of arguments. Expecting 3 or 4") - } - - chainCodeToCall := args[0] - event = args[1] - eventVal, err = strconv.Atoi(args[2]) - if err != nil { - return shim.Error("Expected integer value for event state change") - } - channelID := "" - if len(args) == 4 { - channelID = args[3] - } - - if eventVal != 1 { - fmt.Printf("Unexpected event. Doing nothing\n") - return shim.Success(nil) - } - - f := "invoke" - invokeArgs := toChaincodeArgs(f, "a", "b", "10") - response := stub.InvokeChaincode(chainCodeToCall, invokeArgs, channelID) - if response.Status != shim.OK { - errStr := fmt.Sprintf("Failed to invoke chaincode. Got error: %s", string(response.Payload)) - fmt.Printf(errStr) - return shim.Error(errStr) - } - - fmt.Printf("Invoke chaincode successful. Got response %s", string(response.Payload)) - - // Write the event state back to the ledger - err = stub.PutState(event, []byte(strconv.Itoa(eventVal))) - if err != nil { - return shim.Error(err.Error()) - } - - return response -} - -func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var event string // Event entity - var err error - - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting entity to query") - } - - event = args[0] - var jsonResp string - - // Get the state from the ledger - eventValbytes, err := stub.GetState(event) - if err != nil { - jsonResp = "{\"Error\":\"Failed to get state for " + event + "\"}" - return shim.Error(jsonResp) - } - - if eventValbytes == nil { - jsonResp = "{\"Error\":\"Nil value for " + event + "\"}" - return shim.Error(jsonResp) - } - - if len(args) > 3 { - chainCodeToCall := args[1] - queryKey := args[2] - channel := args[3] - f := "query" - invokeArgs := toChaincodeArgs(f, queryKey) - response := stub.InvokeChaincode(chainCodeToCall, invokeArgs, channel) - if response.Status != shim.OK { - errStr := fmt.Sprintf("Failed to invoke chaincode. Got error: %s", err.Error()) - fmt.Printf(errStr) - return shim.Error(errStr) - } - jsonResp = string(response.Payload) - } else { - jsonResp = "{\"Name\":\"" + event + "\",\"Amount\":\"" + string(eventValbytes) + "\"}" - } - fmt.Printf("Query Response: %s\n", jsonResp) - - return shim.Success([]byte(jsonResp)) -} - -// Invoke is called by fabric to execute a transaction -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - if function == "invoke" { - return t.invoke(stub, args) - } else if function == "query" { - return t.query(stub, args) - } - - return shim.Error("Invalid invoke function name. Expecting \"invoke\" \"query\"") -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/chaincode_test.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/chaincode_test.go deleted file mode 100644 index dfb3df465..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/chaincode_test.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package example04 - -import ( - "fmt" - "testing" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/examples/chaincode/go/example02" -) - -// this is the response to any successful Invoke() on chaincode_example04 -var eventResponse = "{\"Name\":\"Event\",\"Amount\":\"1\"}" - -func checkInit(t *testing.T, stub *shim.MockStub, args [][]byte) { - res := stub.MockInit("1", args) - if res.Status != shim.OK { - fmt.Println("Init failed", string(res.Message)) - t.FailNow() - } -} - -func checkState(t *testing.T, stub *shim.MockStub, name string, value string) { - bytes := stub.State[name] - if bytes == nil { - fmt.Println("State", name, "failed to get value") - t.FailNow() - } - if string(bytes) != value { - fmt.Println("State value", name, "was not", value, "as expected") - t.FailNow() - } -} - -func checkQuery(t *testing.T, stub *shim.MockStub, name string, value string) { - res := stub.MockInvoke("1", [][]byte{[]byte("query"), []byte(name)}) - if res.Status != shim.OK { - fmt.Println("Query", name, "failed", string(res.Message)) - t.FailNow() - } - if res.Payload == nil { - fmt.Println("Query", name, "failed to get value") - t.FailNow() - } - if string(res.Payload) != value { - fmt.Println("Query value", name, "was not", value, "as expected") - t.FailNow() - } -} - -func checkInvoke(t *testing.T, stub *shim.MockStub, args [][]byte) { - res := stub.MockInvoke("1", args) - if res.Status != shim.OK { - fmt.Println("Invoke", args, "failed", string(res.Message)) - t.FailNow() - } -} - -func TestExample04_Init(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex04", scc) - - // Init A=123 B=234 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("Event"), []byte("123")}) - - checkState(t, stub, "Event", "123") -} - -func TestExample04_Query(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex04", scc) - - // Init A=345 B=456 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("Event"), []byte("1")}) - - // Query A - checkQuery(t, stub, "Event", eventResponse) -} - -func TestExample04_Invoke(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex04", scc) - - chaincodeToInvoke := "ex02" - - ccEx2 := new(example02.SimpleChaincode) - stubEx2 := shim.NewMockStub(chaincodeToInvoke, ccEx2) - checkInit(t, stubEx2, [][]byte{[]byte("init"), []byte("a"), []byte("111"), []byte("b"), []byte("222")}) - stub.MockPeerChaincode(chaincodeToInvoke, stubEx2) - - // Init A=567 B=678 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("Event"), []byte("1")}) - - // Invoke A->B for 10 via Example04's chaincode - checkInvoke(t, stub, [][]byte{[]byte("invoke"), []byte(chaincodeToInvoke), []byte("Event"), []byte("1")}) - checkQuery(t, stub, "Event", eventResponse) - checkQuery(t, stubEx2, "a", "101") - checkQuery(t, stubEx2, "b", "232") - - // Invoke A->B for 10 via Example04's chaincode - checkInvoke(t, stub, [][]byte{[]byte("invoke"), []byte(chaincodeToInvoke), []byte("Event"), []byte("1")}) - checkQuery(t, stub, "Event", eventResponse) - checkQuery(t, stubEx2, "a", "91") - checkQuery(t, stubEx2, "b", "242") -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/cmd/main.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/cmd/main.go deleted file mode 100644 index 46857a6af..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example04/cmd/main.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/examples/chaincode/go/example04" -) - -func main() { - err := shim.Start(new(example04.SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/chaincode.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/chaincode.go deleted file mode 100644 index 45909dd46..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/chaincode.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package example05 - -import ( - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// This chaincode is a test for chaincode querying another chaincode - invokes chaincode_example02 and computes the sum of a and b and stores it as state - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct{} - -func toChaincodeArgs(args ...string) [][]byte { - bargs := make([][]byte, len(args)) - for i, arg := range args { - bargs[i] = []byte(arg) - } - return bargs -} - -// Init takes two arguments, a string and int. The string will be a key with -// the int as a value. -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - var sum string // Sum of asset holdings across accounts. Initially 0 - var sumVal int // Sum of holdings - var err error - _, args := stub.GetFunctionAndParameters() - if len(args) != 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - // Initialize the chaincode - sum = args[0] - sumVal, err = strconv.Atoi(args[1]) - if err != nil { - return shim.Error("Expecting integer value for sum") - } - fmt.Printf("sumVal = %d\n", sumVal) - - // Write the state to the ledger - err = stub.PutState(sum, []byte(strconv.Itoa(sumVal))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success(nil) -} - -// Invoke queries another chaincode and updates its own state -func (t *SimpleChaincode) invoke(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var sum, channelName string // Sum entity - var Aval, Bval, sumVal int // value of sum entity - to be computed - var err error - - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting atleast 2") - } - - chaincodeName := args[0] // Expecting name of the chaincode you would like to call, this name would be given during chaincode install time - sum = args[1] - - if len(args) > 2 { - channelName = args[2] - } else { - channelName = "" - } - - // Query chaincode_example02 - f := "query" - queryArgs := toChaincodeArgs(f, "a") - - // if chaincode being invoked is on the same channel, - // then channel defaults to the current channel and args[2] can be "". - // If the chaincode being called is on a different channel, - // then you must specify the channel name in args[2] - - response := stub.InvokeChaincode(chaincodeName, queryArgs, channelName) - if response.Status != shim.OK { - errStr := fmt.Sprintf("Failed to query chaincode. Got error: %s", response.Payload) - fmt.Printf(errStr) - return shim.Error(errStr) - } - Aval, err = strconv.Atoi(string(response.Payload)) - if err != nil { - errStr := fmt.Sprintf("Error retrieving state from ledger for queried chaincode: %s", err.Error()) - fmt.Printf(errStr) - return shim.Error(errStr) - } - - queryArgs = toChaincodeArgs(f, "b") - response = stub.InvokeChaincode(chaincodeName, queryArgs, channelName) - if response.Status != shim.OK { - errStr := fmt.Sprintf("Failed to query chaincode. Got error: %s", response.Payload) - fmt.Printf(errStr) - return shim.Error(errStr) - } - Bval, err = strconv.Atoi(string(response.Payload)) - if err != nil { - errStr := fmt.Sprintf("Error retrieving state from ledger for queried chaincode: %s", err.Error()) - fmt.Printf(errStr) - return shim.Error(errStr) - } - - // Compute sum - sumVal = Aval + Bval - - // Write sumVal back to the ledger - err = stub.PutState(sum, []byte(strconv.Itoa(sumVal))) - if err != nil { - return shim.Error(err.Error()) - } - - fmt.Printf("Invoke chaincode successful. Got sum %d\n", sumVal) - return shim.Success([]byte(strconv.Itoa(sumVal))) -} - -func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var sum, channelName string // Sum entity - var Aval, Bval, sumVal int // value of sum entity - to be computed - var err error - - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting atleast 2") - } - - chaincodeName := args[0] // Expecting name of the chaincode you would like to call, this name would be given during chaincode install time - sum = args[1] - - if len(args) > 2 { - channelName = args[2] - } else { - channelName = "" - } - - // Query chaincode_example02 - f := "query" - queryArgs := toChaincodeArgs(f, "a") - - // if chaincode being invoked is on the same channel, - // then channel defaults to the current channel and args[2] can be "". - // If the chaincode being called is on a different channel, - // then you must specify the channel name in args[2] - response := stub.InvokeChaincode(chaincodeName, queryArgs, channelName) - if response.Status != shim.OK { - errStr := fmt.Sprintf("Failed to query chaincode. Got error: %s", response.Payload) - fmt.Printf(errStr) - return shim.Error(errStr) - } - Aval, err = strconv.Atoi(string(response.Payload)) - if err != nil { - errStr := fmt.Sprintf("Error retrieving state from ledger for queried chaincode: %s", err.Error()) - fmt.Printf(errStr) - return shim.Error(errStr) - } - - queryArgs = toChaincodeArgs(f, "b") - response = stub.InvokeChaincode(chaincodeName, queryArgs, channelName) - if response.Status != shim.OK { - errStr := fmt.Sprintf("Failed to query chaincode. Got error: %s", response.Payload) - fmt.Printf(errStr) - return shim.Error(errStr) - } - Bval, err = strconv.Atoi(string(response.Payload)) - if err != nil { - errStr := fmt.Sprintf("Error retrieving state from ledger for queried chaincode: %s", err.Error()) - fmt.Printf(errStr) - return shim.Error(errStr) - } - - // Compute sum - sumVal = Aval + Bval - - fmt.Printf("Query chaincode successful. Got sum %d\n", sumVal) - jsonResp := "{\"Name\":\"" + sum + "\",\"Value\":\"" + strconv.Itoa(sumVal) + "\"}" - fmt.Printf("Query Response:%s\n", jsonResp) - return shim.Success([]byte(strconv.Itoa(sumVal))) -} - -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - if function == "invoke" { - return t.invoke(stub, args) - } else if function == "query" { - return t.query(stub, args) - } - - return shim.Success([]byte("Invalid invoke function name. Expecting \"invoke\" \"query\"")) -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/chaincode_test.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/chaincode_test.go deleted file mode 100644 index a9a827735..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/chaincode_test.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package example05 - -import ( - "fmt" - "testing" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/examples/chaincode/go/example02" -) - -var chaincodeName = "ex02" - -// chaincode_example05 looks like it wanted to return a JSON response to Query() -// it doesn't actually do this though, it just returns the sum value -func jsonResponse(name string, value string) string { - return fmt.Sprintf("jsonResponse = \"{\"Name\":\"%v\",\"Value\":\"%v\"}", name, value) -} - -func checkInit(t *testing.T, stub *shim.MockStub, args [][]byte) { - res := stub.MockInit("1", args) - if res.Status != shim.OK { - fmt.Println("Init failed", string(res.Message)) - t.FailNow() - } -} - -func checkState(t *testing.T, stub *shim.MockStub, name string, expect string) { - bytes := stub.State[name] - if bytes == nil { - fmt.Println("State", name, "failed to get value") - t.FailNow() - } - if string(bytes) != expect { - fmt.Println("State value", name, "was not", expect, "as expected") - t.FailNow() - } -} - -func checkQuery(t *testing.T, stub *shim.MockStub, args [][]byte, expect string) { - res := stub.MockInvoke("1", args) - if res.Status != shim.OK { - fmt.Println("Query", args, "failed", string(res.Message)) - t.FailNow() - } - if res.Payload == nil { - fmt.Println("Query", args, "failed to get result") - t.FailNow() - } - if string(res.Payload) != expect { - fmt.Println("Query result ", string(res.Payload), "was not", expect, "as expected") - t.FailNow() - } -} - -func checkInvoke(t *testing.T, stub *shim.MockStub, args [][]byte) { - res := stub.MockInvoke("1", args) - if res.Status != shim.OK { - fmt.Println("Invoke", args, "failed", string(res.Message)) - t.FailNow() - } -} - -func TestExample05_Init(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex05", scc) - - // Init A=123 B=234 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("sumStoreName"), []byte("432")}) - - checkState(t, stub, "sumStoreName", "432") -} - -func TestExample05_Query(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex05", scc) - - ccEx2 := new(example02.SimpleChaincode) - stubEx2 := shim.NewMockStub(chaincodeName, ccEx2) - checkInit(t, stubEx2, [][]byte{[]byte("init"), []byte("a"), []byte("111"), []byte("b"), []byte("222")}) - stub.MockPeerChaincode(chaincodeName, stubEx2) - - checkInit(t, stub, [][]byte{[]byte("init"), []byte("sumStoreName"), []byte("0")}) - - // a + b = 111 + 222 = 333 - checkQuery(t, stub, [][]byte{[]byte("query"), []byte(chaincodeName), []byte("sumStoreName"), []byte("")}, "333") // example05 doesn't return JSON? -} - -func TestExample05_Invoke(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex05", scc) - - ccEx2 := new(example02.SimpleChaincode) - stubEx2 := shim.NewMockStub(chaincodeName, ccEx2) - checkInit(t, stubEx2, [][]byte{[]byte("init"), []byte("a"), []byte("222"), []byte("b"), []byte("333")}) - stub.MockPeerChaincode(chaincodeName, stubEx2) - - checkInit(t, stub, [][]byte{[]byte("init"), []byte("sumStoreName"), []byte("0")}) - - // a + b = 222 + 333 = 555 - checkInvoke(t, stub, [][]byte{[]byte("invoke"), []byte(chaincodeName), []byte("sumStoreName"), []byte("")}) - checkQuery(t, stub, [][]byte{[]byte("query"), []byte(chaincodeName), []byte("sumStoreName"), []byte("")}, "555") // example05 doesn't return JSON? - checkQuery(t, stubEx2, [][]byte{[]byte("query"), []byte("a")}, "222") - checkQuery(t, stubEx2, [][]byte{[]byte("query"), []byte("b")}, "333") - - // update A-=10 and B+=10 - checkInvoke(t, stubEx2, [][]byte{[]byte("invoke"), []byte("a"), []byte("b"), []byte("10")}) - - // a + b = 212 + 343 = 555 - checkInvoke(t, stub, [][]byte{[]byte("invoke"), []byte(chaincodeName), []byte("sumStoreName"), []byte("")}) - checkQuery(t, stub, [][]byte{[]byte("query"), []byte(chaincodeName), []byte("sumStoreName"), []byte("")}, "555") // example05 doesn't return JSON? - checkQuery(t, stubEx2, [][]byte{[]byte("query"), []byte("a")}, "212") - checkQuery(t, stubEx2, [][]byte{[]byte("query"), []byte("b")}, "343") -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/cmd/main.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/cmd/main.go deleted file mode 100644 index ee0449658..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/example05/cmd/main.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "fmt" - - "github.com/hyperledger/fabric/core/chaincode/shim" - "github.com/hyperledger/fabric/examples/chaincode/go/example05" -) - -func main() { - err := shim.Start(new(example05.SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/invokereturnsvalue/invokereturnsvalue.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/invokereturnsvalue/invokereturnsvalue.go deleted file mode 100644 index 78d4ec619..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/invokereturnsvalue/invokereturnsvalue.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "strconv" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -// Init method of chaincode -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - _, args := stub.GetFunctionAndParameters() - var A, B string // Entities - var Aval, Bval int // Asset holdings - var err error - - if len(args) != 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - // Initialize the chaincode - A = args[0] - Aval, err = strconv.Atoi(args[1]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - B = args[2] - Bval, err = strconv.Atoi(args[3]) - if err != nil { - return shim.Error("Expecting integer value for asset holding") - } - fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) - - // Write the state to the ledger - err = stub.PutState(A, []byte(strconv.Itoa(Aval))) - if err != nil { - return shim.Error(err.Error()) - } - - err = stub.PutState(B, []byte(strconv.Itoa(Bval))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success([]byte("OK")) -} - -// Invoke transaction makes payment of X units from A to B -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - _, args := stub.GetFunctionAndParameters() - - var A, B string // Entities - var Aval, Bval int // Asset holdings - var X int // Transaction value - var err error - - if len(args) != 3 { - return shim.Error("Incorrect number of arguments. Expecting 3") - } - - A = args[0] - B = args[1] - - // Get the state from the ledger - // TODO: will be nice to have a GetAllState call to ledger - Avalbytes, err := stub.GetState(A) - if err != nil { - return shim.Error("Failed to get state") - } - if Avalbytes == nil { - return shim.Error("Entity not found") - } - Aval, _ = strconv.Atoi(string(Avalbytes)) - - Bvalbytes, err := stub.GetState(B) - if err != nil { - return shim.Error("Failed to get state") - } - if Bvalbytes == nil { - return shim.Error("Entity not found") - } - Bval, _ = strconv.Atoi(string(Bvalbytes)) - - // Perform the execution - X, err = strconv.Atoi(args[2]) - if err != nil { - return shim.Error("Invalid transaction amount, expecting a integer value") - } - Aval = Aval - X - Bval = Bval + X - fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval) - - // Write the state back to the ledger - err = stub.PutState(A, []byte(strconv.Itoa(Aval))) - if err != nil { - return shim.Error(err.Error()) - } - - err = stub.PutState(B, []byte(strconv.Itoa(Bval))) - if err != nil { - return shim.Error(err.Error()) - } - - return shim.Success([]byte(fmt.Sprintf("{%d,%d}", Aval, Bval))) -} - -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/invokereturnsvalue/invokereturnsvalue_test.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/invokereturnsvalue/invokereturnsvalue_test.go deleted file mode 100644 index 1d9d1e1f9..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/invokereturnsvalue/invokereturnsvalue_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package main - -import ( - "fmt" - "testing" - - "github.com/hyperledger/fabric/core/chaincode/shim" -) - -func checkInit(t *testing.T, stub *shim.MockStub, args [][]byte, retval []byte) { - res := stub.MockInit("1", args) - if res.Status != shim.OK { - fmt.Println("Init failed", string(res.Message)) - t.FailNow() - } - if retval != nil { - if res.Payload == nil { - fmt.Printf("Init returned nil, expected %s", string(retval)) - t.FailNow() - } - if string(res.Payload) != string(retval) { - fmt.Printf("Init returned %s, expected %s", string(res.Payload), string(retval)) - t.FailNow() - } - } -} - -func checkState(t *testing.T, stub *shim.MockStub, name string, value string) { - bytes := stub.State[name] - if bytes == nil { - fmt.Println("State", name, "failed to get value") - t.FailNow() - } - if string(bytes) != value { - fmt.Println("State value", name, "was not", value, "as expected") - t.FailNow() - } -} - -func checkInvoke(t *testing.T, stub *shim.MockStub, args [][]byte, retval []byte) { - res := stub.MockInvoke("1", args) - if res.Status != shim.OK { - fmt.Println("Invoke", args, "failed", string(res.Message)) - t.FailNow() - } - - if retval != nil { - if res.Payload == nil { - fmt.Printf("Invoke returned nil, expected %s", string(retval)) - t.FailNow() - } - if string(res.Payload) != string(retval) { - fmt.Printf("Invoke returned %s, expected %s", string(res.Payload), string(retval)) - t.FailNow() - } - } -} - -func Test_Init(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex02", scc) - - // Init A=123 B=234 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("A"), []byte("123"), []byte("B"), []byte("234")}, []byte("OK")) - - checkState(t, stub, "A", "123") - checkState(t, stub, "B", "234") -} - -func Test_Invoke(t *testing.T) { - scc := new(SimpleChaincode) - stub := shim.NewMockStub("ex02", scc) - - // Init A=567 B=678 - checkInit(t, stub, [][]byte{[]byte("init"), []byte("A"), []byte("567"), []byte("B"), []byte("678")}, []byte("OK")) - - // Invoke A->B for 123 - checkInvoke(t, stub, [][]byte{[]byte("invoke"), []byte("A"), []byte("B"), []byte("123")}, []byte("{444,801}")) - - // Invoke B->A for 234 - checkInvoke(t, stub, [][]byte{[]byte("invoke"), []byte("B"), []byte("A"), []byte("234")}, []byte("{567,678}")) -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/map/map.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/map/map.go deleted file mode 100644 index 2f7e2287f..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/map/map.go +++ /dev/null @@ -1,465 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package main - -import ( - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// This chaincode implements a simple map that is stored in the state. -// The following operations are available. - -// Invoke operations -// put - requires two arguments, a key and value -// remove - requires a key -// get - requires one argument, a key, and returns a value -// keys - requires no arguments, returns all keys - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -type PageResponse struct { - Bookmark string `json:"bookmark"` - Keys []string `json:"keys"` -} - -// Init is a no-op -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke has two functions -// put - takes two arguments, a key and value, and stores them in the state -// remove - takes one argument, a key, and removes if from the state -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - switch function { - - case "putPrivate": - return t.putPrivate(stub, args) - - case "removePrivate": - return t.removePrivate(stub, args) - - case "getPrivate": - return t.getPrivate(stub, args) - - case "keysPrivate": - return t.keysPrivate(stub, args) - - case "queryPrivate": - return t.queryPrivate(stub, args) - - case "put": - return t.put(stub, args) - - case "remove": - return t.remove(stub, args) - - case "get": - return t.get(stub, args) - - case "keys": - return t.keys(stub, args) - - case "keysByPage": - return t.keysByPage(stub, args) - - case "query": - return t.query(stub, args) - - case "queryByPage": - return t.queryByPage(stub, args) - - case "history": - return t.history(stub, args) - - case "getPut": - return t.getPut(stub, args) - - case "getPutPrivate": - return t.getPutPrivate(stub, args) - - default: - return shim.Error("Unsupported operation") - } -} - -func (t *SimpleChaincode) putPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 3 { - return shim.Error("put operation on private data must include three arguments: [collection, key, value]") - } - collection := args[0] - key := args[1] - value := args[2] - - if err := stub.PutPrivateData(collection, key, []byte(value)); err != nil { - fmt.Printf("Error putting private data%s", err) - return shim.Error(fmt.Sprintf("put operation failed. Error updating state: %s", err)) - } - - return shim.Success(nil) -} -func (t *SimpleChaincode) removePrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 2 { - return shim.Error("remove operation on private data must include two arguments: [collection, key]") - } - collection := args[0] - key := args[1] - - err := stub.DelPrivateData(collection, key) - if err != nil { - return shim.Error(fmt.Sprintf("remove operation on private data failed. Error updating state: %s", err)) - } - return shim.Success(nil) -} -func (t *SimpleChaincode) getPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 2 { - return shim.Error("get operation on private data must include two arguments: [collection, key]") - } - collection := args[0] - key := args[1] - value, err := stub.GetPrivateData(collection, key) - if err != nil { - return shim.Error(fmt.Sprintf("get operation on private data failed. Error accessing state: %s", err)) - } - jsonVal, err := json.Marshal(string(value)) - return shim.Success(jsonVal) - -} -func (t *SimpleChaincode) keysPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 3 { - return shim.Error("range query operation on private data must include three arguments, a collection, key and value") - } - collection := args[0] - startKey := args[1] - endKey := args[2] - - //sleep needed to test peer's timeout behavior when using iterators - stime := 0 - if len(args) > 3 { - stime, _ = strconv.Atoi(args[3]) - } - - keysIter, err := stub.GetPrivateDataByRange(collection, startKey, endKey) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation failed on private data. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - //if sleeptime is specied, take a nap - if stime > 0 { - time.Sleep(time.Duration(stime) * time.Millisecond) - } - - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("keys operation on private data failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - for key, value := range keys { - fmt.Printf("key %d contains %s\n", key, value) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation on private data failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) -} - -func (t *SimpleChaincode) queryPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - collection := args[0] - query := args[1] - keysIter, err := stub.GetPrivateDataQueryResult(collection, query) - if err != nil { - return shim.Error(fmt.Sprintf("query operation on private data failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("query operation on private data failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("query operation on private data failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) -} -func (t *SimpleChaincode) put(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 2 { - return shim.Error("put operation must include two arguments: [key, value]") - } - key := args[0] - value := args[1] - - if err := stub.PutState(key, []byte(value)); err != nil { - fmt.Printf("Error putting state %s", err) - return shim.Error(fmt.Sprintf("put operation failed. Error updating state: %s", err)) - } - - indexName := "compositeKeyTest" - compositeKeyTestIndex, err := stub.CreateCompositeKey(indexName, []string{key}) - if err != nil { - return shim.Error(err.Error()) - } - - valueByte := []byte{0x00} - if err := stub.PutState(compositeKeyTestIndex, valueByte); err != nil { - fmt.Printf("Error putting state with compositeKey %s", err) - return shim.Error(fmt.Sprintf("put operation failed. Error updating state with compositeKey: %s", err)) - } - - return shim.Success(nil) -} -func (t *SimpleChaincode) remove(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 1 { - return shim.Error("remove operation must include one argument: [key]") - } - key := args[0] - - err := stub.DelState(key) - if err != nil { - return shim.Error(fmt.Sprintf("remove operation failed. Error updating state: %s", err)) - } - return shim.Success(nil) -} -func (t *SimpleChaincode) get(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 1 { - return shim.Error("get operation must include one argument, a key") - } - key := args[0] - value, err := stub.GetState(key) - if err != nil { - return shim.Error(fmt.Sprintf("get operation failed. Error accessing state: %s", err)) - } - jsonVal, err := json.Marshal(string(value)) - return shim.Success(jsonVal) -} -func (t *SimpleChaincode) keys(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 2 { - return shim.Error("keys operation must include two arguments, a key and value") - } - startKey := args[0] - endKey := args[1] - - //sleep needed to test peer's timeout behavior when using iterators - stime := 0 - if len(args) > 2 { - stime, _ = strconv.Atoi(args[2]) - } - - keysIter, err := stub.GetStateByRange(startKey, endKey) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - //if sleeptime is specied, take a nap - if stime > 0 { - time.Sleep(time.Duration(stime) * time.Millisecond) - } - - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - for key, value := range keys { - fmt.Printf("key %d contains %s\n", key, value) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("keys operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) -} - -func (t *SimpleChaincode) keysByPage(stub shim.ChaincodeStubInterface, args []string) pb.Response { - if len(args) < 4 { - return shim.Error("paginated range query operation must include four arguments, a key, value, pageSize and a bookmark") - } - startKey := args[0] - endKey := args[1] - pageSize, parserr := strconv.ParseInt(args[2], 10, 32) - if parserr != nil { - return shim.Error(fmt.Sprintf("error parsing range pagesize: %s", parserr)) - } - bookmark := args[3] - - //sleep needed to test peer's timeout behavior when using iterators - stime := 0 - if len(args) > 4 { - stime, _ = strconv.Atoi(args[4]) - } - - keysIter, resp, err := stub.GetStateByRangeWithPagination(startKey, endKey, int32(pageSize), bookmark) - if err != nil { - return shim.Error(fmt.Sprintf("keysByPage operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - //if sleeptime is specied, take a nap - if stime > 0 { - time.Sleep(time.Duration(stime) * time.Millisecond) - } - - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("keysByPage operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - for index, value := range keys { - fmt.Printf("key %d contains %s\n", index, value) - } - - jsonResp := PageResponse{ - Bookmark: resp.Bookmark, - Keys: keys, - } - - queryResp, err := json.Marshal(jsonResp) - if err != nil { - return shim.Error(fmt.Sprintf("keysByPage operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(queryResp) -} -func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { - query := args[0] - keysIter, err := stub.GetQueryResult(query) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("query operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) -} -func (t *SimpleChaincode) queryByPage(stub shim.ChaincodeStubInterface, args []string) pb.Response { - query := args[0] - pageSize, parserr := strconv.ParseInt(args[1], 10, 32) - if parserr != nil { - return shim.Error(fmt.Sprintf("error parsing query pagesize: %s", parserr)) - } - bookmark := args[2] - - keysIter, resp, err := stub.GetQueryResultWithPagination(query, int32(pageSize), bookmark) - if err != nil { - return shim.Error(fmt.Sprintf("queryByPage operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("queryByPage operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.Key) - } - - for key, value := range keys { - fmt.Printf("key %d contains %s\n", key, value) - } - - jsonResp := PageResponse{ - Bookmark: resp.Bookmark, - Keys: keys, - } - - queryResp, err := json.Marshal(jsonResp) - if err != nil { - return shim.Error(fmt.Sprintf("queryByPage operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(queryResp) -} -func (t *SimpleChaincode) history(stub shim.ChaincodeStubInterface, args []string) pb.Response { - key := args[0] - keysIter, err := stub.GetHistoryForKey(key) - if err != nil { - return shim.Error(fmt.Sprintf("get history operation failed. Error accessing state: %s", err)) - } - defer keysIter.Close() - - var keys []string - for keysIter.HasNext() { - response, iterErr := keysIter.Next() - if iterErr != nil { - return shim.Error(fmt.Sprintf("get history operation failed. Error accessing state: %s", err)) - } - keys = append(keys, response.TxId) - } - - for key, txID := range keys { - fmt.Printf("key %d contains %s\n", key, txID) - } - - jsonKeys, err := json.Marshal(keys) - if err != nil { - return shim.Error(fmt.Sprintf("get history operation failed. Error marshaling JSON: %s", err)) - } - - return shim.Success(jsonKeys) -} -func (t *SimpleChaincode) getPut(stub shim.ChaincodeStubInterface, args []string) pb.Response { - _ = t.get(stub, args) - return t.put(stub, args) -} -func (t *SimpleChaincode) getPutPrivate(stub shim.ChaincodeStubInterface, args []string) pb.Response { - _ = t.getPrivate(stub, args) - return t.putPrivate(stub, args) -} -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/marbles02/META-INF/statedb/couchdb/indexes/indexOwner.json b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/marbles02/META-INF/statedb/couchdb/indexes/indexOwner.json deleted file mode 100644 index 0cf070623..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/marbles02/META-INF/statedb/couchdb/indexes/indexOwner.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "index": { "fields": ["docType", "owner"] }, - "ddoc": "indexOwnerDoc", - "name": "indexOwner", - "type": "json" -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/marbles02/marbles_chaincode.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/marbles02/marbles_chaincode.go deleted file mode 100644 index 2ed3efd67..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/marbles02/marbles_chaincode.go +++ /dev/null @@ -1,755 +0,0 @@ -/* - SPDX-License-Identifier: Apache-2.0 -*/ - -// ====CHAINCODE EXECUTION SAMPLES (CLI) ================== - -// ==== Invoke marbles ==== -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble1","blue","35","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble2","red","50","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["initMarble","marble3","blue","70","tom"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarble","marble2","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["transferMarblesBasedOnColor","blue","jerry"]}' -// peer chaincode invoke -C myc1 -n marbles -c '{"Args":["delete","marble1"]}' - -// ==== Query marbles ==== -// peer chaincode query -C myc1 -n marbles -c '{"Args":["readMarble","marble1"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getMarblesByRange","marble1","marble3"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["getHistoryForMarble","marble1"]}' - -// Rich Query (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesByOwner","tom"]}' -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"owner\":\"tom\"}}"]}' - -// Rich Query with Pagination (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesWithPagination","{\"selector\":{\"owner\":\"tom\"}}","3",""]}' - -// INDEXES TO SUPPORT COUCHDB RICH QUERIES -// -// Indexes in CouchDB are required in order to make JSON queries efficient and are required for -// any JSON query with a sort. As of Hyperledger Fabric 1.1, indexes may be packaged alongside -// chaincode in a META-INF/statedb/couchdb/indexes directory. Each index must be defined in its own -// text file with extension *.json with the index definition formatted in JSON following the -// CouchDB index JSON syntax as documented at: -// http://docs.couchdb.org/en/2.1.1/api/database/find.html#db-index -// -// This marbles02 example chaincode demonstrates a packaged -// index which you can find in META-INF/statedb/couchdb/indexes/indexOwner.json. -// For deployment of chaincode to production environments, it is recommended -// to define any indexes alongside chaincode so that the chaincode and supporting indexes -// are deployed automatically as a unit, once the chaincode has been installed on a peer and -// instantiated on a channel. See Hyperledger Fabric documentation for more details. -// -// If you have access to the your peer's CouchDB state database in a development environment, -// you may want to iteratively test various indexes in support of your chaincode queries. You -// can use the CouchDB Fauxton interface or a command line curl utility to create and update -// indexes. Then once you finalize an index, include the index definition alongside your -// chaincode in the META-INF/statedb/couchdb/indexes directory, for packaging and deployment -// to managed environments. -// -// In the examples below you can find index definitions that support marbles02 -// chaincode queries, along with the syntax that you can use in development environments -// to create the indexes in the CouchDB Fauxton interface or a curl command line utility. -// - -//Example hostname:port configurations to access CouchDB. -// -//To access CouchDB docker container from within another docker container or from vagrant environments: -// http://couchdb:5984/ -// -//Inside couchdb docker container -// http://127.0.0.1:5984/ - -// Index for docType, owner. -// -// Example curl command line to define index in the CouchDB channel_chaincode database -// curl -i -X POST -H "Content-Type: application/json" -d "{\"index\":{\"fields\":[\"docType\",\"owner\"]},\"name\":\"indexOwner\",\"ddoc\":\"indexOwnerDoc\",\"type\":\"json\"}" http://hostname:port/myc1_marbles/_index -// - -// Index for docType, owner, size (descending order). -// -// Example curl command line to define index in the CouchDB channel_chaincode database -// curl -i -X POST -H "Content-Type: application/json" -d "{\"index\":{\"fields\":[{\"size\":\"desc\"},{\"docType\":\"desc\"},{\"owner\":\"desc\"}]},\"ddoc\":\"indexSizeSortDoc\", \"name\":\"indexSizeSortDesc\",\"type\":\"json\"}" http://hostname:port/myc1_marbles/_index - -// Rich Query with index design doc and index name specified (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"docType\":\"marble\",\"owner\":\"tom\"}, \"use_index\":[\"_design/indexOwnerDoc\", \"indexOwner\"]}"]}' - -// Rich Query with index design doc specified only (Only supported if CouchDB is used as state database): -// peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"docType\":{\"$eq\":\"marble\"},\"owner\":{\"$eq\":\"tom\"},\"size\":{\"$gt\":0}},\"fields\":[\"docType\",\"owner\",\"size\"],\"sort\":[{\"size\":\"desc\"}],\"use_index\":\"_design/indexSizeSortDoc\"}"]}' - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SimpleChaincode example simple Chaincode implementation -type SimpleChaincode struct { -} - -type marble struct { - ObjectType string `json:"docType"` //docType is used to distinguish the various types of objects in state database - Name string `json:"name"` //the fieldtags are needed to keep case from bouncing around - Color string `json:"color"` - Size int `json:"size"` - Owner string `json:"owner"` -} - -// =================================================================================== -// Main -// =================================================================================== -func main() { - err := shim.Start(new(SimpleChaincode)) - if err != nil { - fmt.Printf("Error starting Simple chaincode: %s", err) - } -} - -// Init initializes chaincode -// =========================== -func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - return shim.Success(nil) -} - -// Invoke - Our entry point for Invocations -// ======================================== -func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - fmt.Println("invoke is running " + function) - - // Handle different functions - if function == "initMarble" { //create a new marble - return t.initMarble(stub, args) - } else if function == "transferMarble" { //change owner of a specific marble - return t.transferMarble(stub, args) - } else if function == "transferMarblesBasedOnColor" { //transfer all marbles of a certain color - return t.transferMarblesBasedOnColor(stub, args) - } else if function == "delete" { //delete a marble - return t.delete(stub, args) - } else if function == "readMarble" { //read a marble - return t.readMarble(stub, args) - } else if function == "queryMarblesByOwner" { //find marbles for owner X using rich query - return t.queryMarblesByOwner(stub, args) - } else if function == "queryMarbles" { //find marbles based on an ad hoc rich query - return t.queryMarbles(stub, args) - } else if function == "getHistoryForMarble" { //get history of values for a marble - return t.getHistoryForMarble(stub, args) - } else if function == "getMarblesByRange" { //get marbles based on range query - return t.getMarblesByRange(stub, args) - } else if function == "getMarblesByRangeWithPagination" { - return t.getMarblesByRangeWithPagination(stub, args) - } else if function == "queryMarblesWithPagination" { - return t.queryMarblesWithPagination(stub, args) - } - - fmt.Println("invoke did not find func: " + function) //error - return shim.Error("Received unknown function invocation") -} - -// ============================================================ -// initMarble - create a new marble, store into chaincode state -// ============================================================ -func (t *SimpleChaincode) initMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var err error - - // 0 1 2 3 - // "asdf", "blue", "35", "bob" - if len(args) != 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - // ==== Input sanitation ==== - fmt.Println("- start init marble") - if len(args[0]) <= 0 { - return shim.Error("1st argument must be a non-empty string") - } - if len(args[1]) <= 0 { - return shim.Error("2nd argument must be a non-empty string") - } - if len(args[2]) <= 0 { - return shim.Error("3rd argument must be a non-empty string") - } - if len(args[3]) <= 0 { - return shim.Error("4th argument must be a non-empty string") - } - marbleName := args[0] - color := strings.ToLower(args[1]) - owner := strings.ToLower(args[3]) - size, err := strconv.Atoi(args[2]) - if err != nil { - return shim.Error("3rd argument must be a numeric string") - } - - // ==== Check if marble already exists ==== - marbleAsBytes, err := stub.GetState(marbleName) - if err != nil { - return shim.Error("Failed to get marble: " + err.Error()) - } else if marbleAsBytes != nil { - fmt.Println("This marble already exists: " + marbleName) - return shim.Error("This marble already exists: " + marbleName) - } - - // ==== Create marble object and marshal to JSON ==== - objectType := "marble" - marble := &marble{objectType, marbleName, color, size, owner} - marbleJSONasBytes, err := json.Marshal(marble) - if err != nil { - return shim.Error(err.Error()) - } - //Alternatively, build the marble json string manually if you don't want to use struct marshalling - //marbleJSONasString := `{"docType":"Marble", "name": "` + marbleName + `", "color": "` + color + `", "size": ` + strconv.Itoa(size) + `, "owner": "` + owner + `"}` - //marbleJSONasBytes := []byte(str) - - // === Save marble to state === - err = stub.PutState(marbleName, marbleJSONasBytes) - if err != nil { - return shim.Error(err.Error()) - } - - // ==== Index the marble to enable color-based range queries, e.g. return all blue marbles ==== - // An 'index' is a normal key/value entry in state. - // The key is a composite key, with the elements that you want to range query on listed first. - // In our case, the composite key is based on indexName~color~name. - // This will enable very efficient state range queries based on composite keys matching indexName~color~* - indexName := "color~name" - colorNameIndexKey, err := stub.CreateCompositeKey(indexName, []string{marble.Color, marble.Name}) - if err != nil { - return shim.Error(err.Error()) - } - // Save index entry to state. Only the key name is needed, no need to store a duplicate copy of the marble. - // Note - passing a 'nil' value will effectively delete the key from state, therefore we pass null character as value - value := []byte{0x00} - stub.PutState(colorNameIndexKey, value) - - // ==== Marble saved and indexed. Return success ==== - fmt.Println("- end init marble") - return shim.Success(nil) -} - -// =============================================== -// readMarble - read a marble from chaincode state -// =============================================== -func (t *SimpleChaincode) readMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var name, jsonResp string - var err error - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting name of the marble to query") - } - - name = args[0] - valAsbytes, err := stub.GetState(name) //get the marble from chaincode state - if err != nil { - jsonResp = "{\"Error\":\"Failed to get state for " + name + "\"}" - return shim.Error(jsonResp) - } else if valAsbytes == nil { - jsonResp = "{\"Error\":\"Marble does not exist: " + name + "\"}" - return shim.Error(jsonResp) - } - - return shim.Success(valAsbytes) -} - -// ================================================== -// delete - remove a marble key/value pair from state -// ================================================== -func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response { - var jsonResp string - var marbleJSON marble - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - marbleName := args[0] - - // to maintain the color~name index, we need to read the marble first and get its color - valAsbytes, err := stub.GetState(marbleName) //get the marble from chaincode state - if err != nil { - jsonResp = "{\"Error\":\"Failed to get state for " + marbleName + "\"}" - return shim.Error(jsonResp) - } else if valAsbytes == nil { - jsonResp = "{\"Error\":\"Marble does not exist: " + marbleName + "\"}" - return shim.Error(jsonResp) - } - - err = json.Unmarshal([]byte(valAsbytes), &marbleJSON) - if err != nil { - jsonResp = "{\"Error\":\"Failed to decode JSON of: " + marbleName + "\"}" - return shim.Error(jsonResp) - } - - err = stub.DelState(marbleName) //remove the marble from chaincode state - if err != nil { - return shim.Error("Failed to delete state:" + err.Error()) - } - - // maintain the index - indexName := "color~name" - colorNameIndexKey, err := stub.CreateCompositeKey(indexName, []string{marbleJSON.Color, marbleJSON.Name}) - if err != nil { - return shim.Error(err.Error()) - } - - // Delete index entry to state. - err = stub.DelState(colorNameIndexKey) - if err != nil { - return shim.Error("Failed to delete state:" + err.Error()) - } - return shim.Success(nil) -} - -// =========================================================== -// transfer a marble by setting a new owner name on the marble -// =========================================================== -func (t *SimpleChaincode) transferMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 1 - // "name", "bob" - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - marbleName := args[0] - newOwner := strings.ToLower(args[1]) - fmt.Println("- start transferMarble ", marbleName, newOwner) - - marbleAsBytes, err := stub.GetState(marbleName) - if err != nil { - return shim.Error("Failed to get marble:" + err.Error()) - } else if marbleAsBytes == nil { - return shim.Error("Marble does not exist") - } - - marbleToTransfer := marble{} - err = json.Unmarshal(marbleAsBytes, &marbleToTransfer) //unmarshal it aka JSON.parse() - if err != nil { - return shim.Error(err.Error()) - } - marbleToTransfer.Owner = newOwner //change the owner - - marbleJSONasBytes, _ := json.Marshal(marbleToTransfer) - err = stub.PutState(marbleName, marbleJSONasBytes) //rewrite the marble - if err != nil { - return shim.Error(err.Error()) - } - - fmt.Println("- end transferMarble (success)") - return shim.Success(nil) -} - -// =========================================================================================== -// constructQueryResponseFromIterator constructs a JSON array containing query results from -// a given result iterator -// =========================================================================================== -func constructQueryResponseFromIterator(resultsIterator shim.StateQueryIteratorInterface) (*bytes.Buffer, error) { - // buffer is a JSON array containing QueryResults - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - queryResponse, err := resultsIterator.Next() - if err != nil { - return nil, err - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"Key\":") - buffer.WriteString("\"") - buffer.WriteString(queryResponse.Key) - buffer.WriteString("\"") - - buffer.WriteString(", \"Record\":") - // Record is a JSON object, so we write as-is - buffer.WriteString(string(queryResponse.Value)) - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - return &buffer, nil -} - -// =========================================================================================== -// addPaginationMetadataToQueryResults adds QueryResponseMetadata, which contains pagination -// info, to the constructed query results -// =========================================================================================== -func addPaginationMetadataToQueryResults(buffer *bytes.Buffer, responseMetadata *pb.QueryResponseMetadata) *bytes.Buffer { - - buffer.WriteString("[{\"ResponseMetadata\":{\"RecordsCount\":") - buffer.WriteString("\"") - buffer.WriteString(fmt.Sprintf("%v", responseMetadata.FetchedRecordsCount)) - buffer.WriteString("\"") - buffer.WriteString(", \"Bookmark\":") - buffer.WriteString("\"") - buffer.WriteString(responseMetadata.Bookmark) - buffer.WriteString("\"}}]") - - return buffer -} - -// =========================================================================================== -// getMarblesByRange performs a range query based on the start and end keys provided. - -// Read-only function results are not typically submitted to ordering. If the read-only -// results are submitted to ordering, or if the query is used in an update transaction -// and submitted to ordering, then the committing peers will re-execute to guarantee that -// result sets are stable between endorsement time and commit time. The transaction is -// invalidated by the committing peers if the result set has changed between endorsement -// time and commit time. -// Therefore, range queries are a safe option for performing update transactions based on query results. -// =========================================================================================== -func (t *SimpleChaincode) getMarblesByRange(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - startKey := args[0] - endKey := args[1] - - resultsIterator, err := stub.GetStateByRange(startKey, endKey) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - buffer, err := constructQueryResponseFromIterator(resultsIterator) - if err != nil { - return shim.Error(err.Error()) - } - - fmt.Printf("- getMarblesByRange queryResult:\n%s\n", buffer.String()) - - return shim.Success(buffer.Bytes()) -} - -// ==== Example: GetStateByPartialCompositeKey/RangeQuery ========================================= -// transferMarblesBasedOnColor will transfer marbles of a given color to a certain new owner. -// Uses a GetStateByPartialCompositeKey (range query) against color~name 'index'. -// Committing peers will re-execute range queries to guarantee that result sets are stable -// between endorsement time and commit time. The transaction is invalidated by the -// committing peers if the result set has changed between endorsement time and commit time. -// Therefore, range queries are a safe option for performing update transactions based on query results. -// =========================================================================================== -func (t *SimpleChaincode) transferMarblesBasedOnColor(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 1 - // "color", "bob" - if len(args) < 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - color := args[0] - newOwner := strings.ToLower(args[1]) - fmt.Println("- start transferMarblesBasedOnColor ", color, newOwner) - - // Query the color~name index by color - // This will execute a key range query on all keys starting with 'color' - coloredMarbleResultsIterator, err := stub.GetStateByPartialCompositeKey("color~name", []string{color}) - if err != nil { - return shim.Error(err.Error()) - } - defer coloredMarbleResultsIterator.Close() - - // Iterate through result set and for each marble found, transfer to newOwner - var i int - for i = 0; coloredMarbleResultsIterator.HasNext(); i++ { - // Note that we don't get the value (2nd return variable), we'll just get the marble name from the composite key - responseRange, err := coloredMarbleResultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - - // get the color and name from color~name composite key - objectType, compositeKeyParts, err := stub.SplitCompositeKey(responseRange.Key) - if err != nil { - return shim.Error(err.Error()) - } - returnedColor := compositeKeyParts[0] - returnedMarbleName := compositeKeyParts[1] - fmt.Printf("- found a marble from index:%s color:%s name:%s\n", objectType, returnedColor, returnedMarbleName) - - // Now call the transfer function for the found marble. - // Re-use the same function that is used to transfer individual marbles - response := t.transferMarble(stub, []string{returnedMarbleName, newOwner}) - // if the transfer failed break out of loop and return error - if response.Status != shim.OK { - return shim.Error("Transfer failed: " + response.Message) - } - } - - responsePayload := fmt.Sprintf("Transferred %d %s marbles to %s", i, color, newOwner) - fmt.Println("- end transferMarblesBasedOnColor: " + responsePayload) - return shim.Success([]byte(responsePayload)) -} - -// =======Rich queries ========================================================================= -// Two examples of rich queries are provided below (parameterized query and ad hoc query). -// Rich queries pass a query string to the state database. -// Rich queries are only supported by state database implementations -// that support rich query (e.g. CouchDB). -// The query string is in the syntax of the underlying state database. -// With rich queries there is no guarantee that the result set hasn't changed between -// endorsement time and commit time, aka 'phantom reads'. -// Therefore, rich queries should not be used in update transactions, unless the -// application handles the possibility of result set changes between endorsement and commit time. -// Rich queries can be used for point-in-time queries against a peer. -// ============================================================================================ - -// ===== Example: Parameterized rich query ================================================= -// queryMarblesByOwner queries for marbles based on a passed in owner. -// This is an example of a parameterized query where the query logic is baked into the chaincode, -// and accepting a single query parameter (owner). -// Only available on state databases that support rich query (e.g. CouchDB) -// ========================================================================================= -func (t *SimpleChaincode) queryMarblesByOwner(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "bob" - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - owner := strings.ToLower(args[0]) - - queryString := fmt.Sprintf("{\"selector\":{\"docType\":\"marble\",\"owner\":\"%s\"}}", owner) - - queryResults, err := getQueryResultForQueryString(stub, queryString) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ===== Example: Ad hoc rich query ======================================================== -// queryMarbles uses a query string to perform a query for marbles. -// Query string matching state database syntax is passed in and executed as is. -// Supports ad hoc queries that can be defined at runtime by the client. -// If this is not desired, follow the queryMarblesForOwner example for parameterized queries. -// Only available on state databases that support rich query (e.g. CouchDB) -// ========================================================================================= -func (t *SimpleChaincode) queryMarbles(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "queryString" - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - queryString := args[0] - - queryResults, err := getQueryResultForQueryString(stub, queryString) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ========================================================================================= -// getQueryResultForQueryString executes the passed in query string. -// Result set is built and returned as a byte array containing the JSON results. -// ========================================================================================= -func getQueryResultForQueryString(stub shim.ChaincodeStubInterface, queryString string) ([]byte, error) { - - fmt.Printf("- getQueryResultForQueryString queryString:\n%s\n", queryString) - - resultsIterator, err := stub.GetQueryResult(queryString) - if err != nil { - return nil, err - } - defer resultsIterator.Close() - - buffer, err := constructQueryResponseFromIterator(resultsIterator) - if err != nil { - return nil, err - } - - fmt.Printf("- getQueryResultForQueryString queryResult:\n%s\n", buffer.String()) - - return buffer.Bytes(), nil -} - -// ====== Pagination ========================================================================= -// Pagination provides a method to retrieve records with a defined pagesize and -// start point (bookmark). An empty string bookmark defines the first "page" of a query -// result. Paginated queries return a bookmark that can be used in -// the next query to retrieve the next page of results. Paginated queries extend -// rich queries and range queries to include a pagesize and bookmark. -// -// Two examples are provided in this example. The first is getMarblesByRangeWithPagination -// which executes a paginated range query. -// The second example is a paginated query for rich ad-hoc queries. -// ========================================================================================= - -// ====== Example: Pagination with Range Query =============================================== -// getMarblesByRangeWithPagination performs a range query based on the start & end key, -// page size and a bookmark. - -// The number of fetched records will be equal to or lesser than the page size. -// Paginated range queries are only valid for read only transactions. -// =========================================================================================== -func (t *SimpleChaincode) getMarblesByRangeWithPagination(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 4 { - return shim.Error("Incorrect number of arguments. Expecting 4") - } - - startKey := args[0] - endKey := args[1] - //return type of ParseInt is int64 - pageSize, err := strconv.ParseInt(args[2], 10, 32) - if err != nil { - return shim.Error(err.Error()) - } - bookmark := args[3] - - resultsIterator, responseMetadata, err := stub.GetStateByRangeWithPagination(startKey, endKey, int32(pageSize), bookmark) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - buffer, err := constructQueryResponseFromIterator(resultsIterator) - if err != nil { - return shim.Error(err.Error()) - } - - bufferWithPaginationInfo := addPaginationMetadataToQueryResults(buffer, responseMetadata) - - fmt.Printf("- getMarblesByRange queryResult:\n%s\n", bufferWithPaginationInfo.String()) - - return shim.Success(buffer.Bytes()) -} - -// ===== Example: Pagination with Ad hoc Rich Query ======================================================== -// queryMarblesWithPagination uses a query string, page size and a bookmark to perform a query -// for marbles. Query string matching state database syntax is passed in and executed as is. -// The number of fetched records would be equal to or lesser than the specified page size. -// Supports ad hoc queries that can be defined at runtime by the client. -// If this is not desired, follow the queryMarblesForOwner example for parameterized queries. -// Only available on state databases that support rich query (e.g. CouchDB) -// Paginated queries are only valid for read only transactions. -// ========================================================================================= -func (t *SimpleChaincode) queryMarblesWithPagination(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - // 0 - // "queryString" - if len(args) < 3 { - return shim.Error("Incorrect number of arguments. Expecting 3") - } - - queryString := args[0] - //return type of ParseInt is int64 - pageSize, err := strconv.ParseInt(args[1], 10, 32) - if err != nil { - return shim.Error(err.Error()) - } - bookmark := args[2] - - queryResults, err := getQueryResultForQueryStringWithPagination(stub, queryString, int32(pageSize), bookmark) - if err != nil { - return shim.Error(err.Error()) - } - return shim.Success(queryResults) -} - -// ========================================================================================= -// getQueryResultForQueryStringWithPagination executes the passed in query string with -// pagination info. Result set is built and returned as a byte array containing the JSON results. -// ========================================================================================= -func getQueryResultForQueryStringWithPagination(stub shim.ChaincodeStubInterface, queryString string, pageSize int32, bookmark string) ([]byte, error) { - - fmt.Printf("- getQueryResultForQueryString queryString:\n%s\n", queryString) - - resultsIterator, responseMetadata, err := stub.GetQueryResultWithPagination(queryString, pageSize, bookmark) - if err != nil { - return nil, err - } - defer resultsIterator.Close() - - buffer, err := constructQueryResponseFromIterator(resultsIterator) - if err != nil { - return nil, err - } - - bufferWithPaginationInfo := addPaginationMetadataToQueryResults(buffer, responseMetadata) - - fmt.Printf("- getQueryResultForQueryString queryResult:\n%s\n", bufferWithPaginationInfo.String()) - - return buffer.Bytes(), nil -} - -func (t *SimpleChaincode) getHistoryForMarble(stub shim.ChaincodeStubInterface, args []string) pb.Response { - - if len(args) < 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - marbleName := args[0] - - fmt.Printf("- start getHistoryForMarble: %s\n", marbleName) - - resultsIterator, err := stub.GetHistoryForKey(marbleName) - if err != nil { - return shim.Error(err.Error()) - } - defer resultsIterator.Close() - - // buffer is a JSON array containing historic values for the marble - var buffer bytes.Buffer - buffer.WriteString("[") - - bArrayMemberAlreadyWritten := false - for resultsIterator.HasNext() { - response, err := resultsIterator.Next() - if err != nil { - return shim.Error(err.Error()) - } - // Add a comma before array members, suppress it for the first array member - if bArrayMemberAlreadyWritten == true { - buffer.WriteString(",") - } - buffer.WriteString("{\"TxId\":") - buffer.WriteString("\"") - buffer.WriteString(response.TxId) - buffer.WriteString("\"") - - buffer.WriteString(", \"Value\":") - // if it was a delete operation on given key, then we need to set the - //corresponding value null. Else, we will write the response.Value - //as-is (as the Value itself a JSON marble) - if response.IsDelete { - buffer.WriteString("null") - } else { - buffer.WriteString(string(response.Value)) - } - - buffer.WriteString(", \"Timestamp\":") - buffer.WriteString("\"") - buffer.WriteString(time.Unix(response.Timestamp.Seconds, int64(response.Timestamp.Nanos)).String()) - buffer.WriteString("\"") - - buffer.WriteString(", \"IsDelete\":") - buffer.WriteString("\"") - buffer.WriteString(strconv.FormatBool(response.IsDelete)) - buffer.WriteString("\"") - - buffer.WriteString("}") - bArrayMemberAlreadyWritten = true - } - buffer.WriteString("]") - - fmt.Printf("- getHistoryForMarble returning:\n%s\n", buffer.String()) - - return shim.Success(buffer.Bytes()) -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/passthru/passthru.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/passthru/passthru.go deleted file mode 100644 index 49d978bf4..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/passthru/passthru.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "fmt" - "strings" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// PassthruChaincode passes thru invoke and query to another chaincode where -// called ChaincodeID = function -// called chaincode's function = args[0] -// called chaincode's args = args[1:] -type PassthruChaincode struct { -} - -func toChaincodeArgs(args ...string) [][]byte { - bargs := make([][]byte, len(args)) - for i, arg := range args { - bargs[i] = []byte(arg) - } - return bargs -} - -//Init func will return error if function has string "error" anywhere -func (p *PassthruChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - function, _ := stub.GetFunctionAndParameters() - if strings.Index(function, "error") >= 0 { - return shim.Error(function) - } - return shim.Success([]byte(function)) -} - -//helper -func (p *PassthruChaincode) iq(stub shim.ChaincodeStubInterface, function string, args []string) pb.Response { - if function == "" { - return shim.Error("Chaincode ID not provided") - } - chaincodeID := function - - return stub.InvokeChaincode(chaincodeID, toChaincodeArgs(args...), "") -} - -// Invoke passes through the invoke call -func (p *PassthruChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - return p.iq(stub, function, args) -} - -func main() { - err := shim.Start(new(PassthruChaincode)) - if err != nil { - fmt.Printf("Error starting Passthru chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/sleeper/sleeper.go b/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/sleeper/sleeper.go deleted file mode 100644 index 088b598cb..000000000 --- a/app/platform/fabric/e2e-test/fabric/examples/chaincode/go/sleeper/sleeper.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright IBM Corp. 2016 All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// Sleeper chaincode sleeps and works with one state variable -// Init - 1 param, a sleep time in millisecs -// Invoke - 4 or 3 params, "put" or "get", value to set and sleep time in millisecs -// -// Sleeper can be used to test the "chaincode.executetimeout" property - -import ( - "fmt" - "strconv" - "time" - - "github.com/hyperledger/fabric/core/chaincode/shim" - pb "github.com/hyperledger/fabric/protos/peer" -) - -// SleeperChaincode example simple Chaincode implementation -type SleeperChaincode struct { -} - -func (t *SleeperChaincode) sleep(sleepTime string) { - st, _ := strconv.Atoi(sleepTime) - if st >= 0 { - time.Sleep(time.Duration(st) * time.Millisecond) - } -} - -// Init initializes chaincode...all it does is sleep a bi -func (t *SleeperChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response { - args := stub.GetStringArgs() - - if len(args) != 1 { - return shim.Error("Incorrect number of arguments. Expecting 1") - } - - sleepTime := args[0] - - t.sleep(sleepTime) - - return shim.Success(nil) -} - -// Invoke sets key/value and sleeps a bit -func (t *SleeperChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response { - function, args := stub.GetFunctionAndParameters() - if function == "put" { - if len(args) != 3 { - return shim.Error("Incorrect number of arguments. Expecting 3") - } - - // Make payment of X units from A to B - return t.invoke(stub, args) - } else if function == "get" { - if len(args) != 2 { - return shim.Error("Incorrect number of arguments. Expecting 2") - } - - // the old "Query" is now implemtned in invoke - return t.query(stub, args) - } - - return shim.Error("Invalid invoke function name. Expecting \"put\" or \"get\"") -} - -// Transaction makes payment of X units from A to B -func (t *SleeperChaincode) invoke(stub shim.ChaincodeStubInterface, args []string) pb.Response { - // set state - key := args[0] - val := args[1] - - err := stub.PutState(key, []byte(val)) - if err != nil { - return shim.Error(err.Error()) - } - - sleepTime := args[2] - - //sleep for a bit - t.sleep(sleepTime) - - return shim.Success([]byte("OK")) -} - -// query callback representing the query of a chaincode -func (t *SleeperChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response { - key := args[0] - - // Get the state from the ledger - val, err := stub.GetState(key) - if err != nil { - return shim.Error(err.Error()) - } - - sleepTime := args[1] - - //sleep for a bit - t.sleep(sleepTime) - - return shim.Success(val) -} - -func main() { - err := shim.Start(new(SleeperChaincode)) - if err != nil { - fmt.Printf("Error starting Sleeper chaincode: %s", err) - } -} diff --git a/app/platform/fabric/e2e-test/feature/.env b/app/platform/fabric/e2e-test/feature/.env deleted file mode 100644 index aeab38d40..000000000 --- a/app/platform/fabric/e2e-test/feature/.env +++ /dev/null @@ -1,30 +0,0 @@ -CONFIGTX_ORDERER_BATCHSIZE_MAXMESSAGECOUNT=10 -CONFIGTX_ORDERER_BATCHTIMEOUT=2s -KAFKA_DEFAULT_REPLICATION_FACTOR=3 -FABRIC_LOGGING_SPEC=peer,endorser,nodeCmd,committer,orderer=DEBUG:INFO -ORDERER_GENERAL_TLS_ENABLED=false -ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/tls/server.key -ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/tls/server.crt -ORDERER_TLS_CLIENTAUTHREQUIRED=false -CORE_PEER_TLS_ENABLED=false -CORE_PEER_TLS_CERT_FILE=/var/hyperledger/tls/server.crt -CORE_PEER_TLS_KEY_FILE=/var/hyperledger/tls/server.key -CORE_PEER_TLS_CLIENTAUTHREQUIRED=false -FABRIC_CA_SERVER_TLS_ENABLED=false -CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG1=false -CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG1=true -CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG2=false -CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG2=true -CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG3=false -CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG3=true -CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG1=false -CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG1=true -CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG2=false -CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG2=true -CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG3=false -CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG3=true -ORDERER_ABSOLUTEMAXBYTES=10 MB -ORDERER_PREFERREDMAXBYTES=512 KB -KAFKA_MESSAGE_MAX_BYTES=1000012 B -KAFKA_REPLICA_FETCH_MAX_BYTES=1048576 B -KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=10485760 B diff --git a/app/platform/fabric/e2e-test/feature/CouchDB_index.feature b/app/platform/fabric/e2e-test/feature/CouchDB_index.feature deleted file mode 100644 index f0eedb97a..000000000 --- a/app/platform/fabric/e2e-test/feature/CouchDB_index.feature +++ /dev/null @@ -1,450 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -Feature: Testing Fabric CouchDB indexing - - @daily - Scenario Outline: : Test CouchDB indexing using marbles chaincode using with 1 channels and 1 index with 1 selector - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - When a user defines a couchDB index named index_behave_test with design document name "indexdoc_behave_test" containing the fields "size" to the chaincode at path "" - - # set up 1 channels, 1 cc - When an admin sets up a channel named "mychannel1" - And an admin deploys chaincode at path "" with args [""] with name "mycc1" with language "" on channel "mychannel1" - - # Invoke in the channel - When a user invokes on the channel "mychannel1" using chaincode named "mycc1" with args ["initMarble","marble100","red","5","cassey"] on "peer0.org1.example.com" - - # Do sanity-check rich query - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles","{\\"selector\\":{\\"size\\":5}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"cassey" - - # Explicitly check with CouchDB to confirm the index is set up correctly for the rich query to pass using index - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"size":"asc"}] from the couchDB container - -Examples: - | cc_path | index_path | language | jira_num | - | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | GOLANG | FAB-7251 | - | ../../fabric-samples/chaincode/marbles02/node | ../fabric-samples/chaincode/marbles02/node | NODE | FAB-7254 | - - -@daily - Scenario Outline: : Test CouchDB indexing using marbles chaincode using with 3 channels and 1 index with 3 selectors - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - When a user defines a couchDB index named index_behave_test with design document name "indexdoc_behave_test" containing the fields "owner,docType,color" to the chaincode at path "" - - # set up 3 channels, each with one unique chaincode - When an admin sets up a channel named "mychannel1" - And an admin sets up a channel named "mychannel2" - And an admin sets up a channel named "mychannel3" - And an admin deploys chaincode at path "" with args [""] with name "mycc1" with language "" on channel "mychannel1" - And an admin deploys chaincode at path "" with args [""] with name "mycc2" with language "" on channel "mychannel2" - And an admin deploys chaincode at path "" with args [""] with name "mycc3" with language "" on channel "mychannel3" - - # Invoke in each channel - When a user invokes on the channel "mychannel1" using chaincode named "mycc1" with args ["initMarble","marble1","green","10","matt"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel2" using chaincode named "mycc2" with args ["initMarble","marble2","yellow","20","alex"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel3" using chaincode named "mycc3" with args ["initMarble","marble3","red","5","jose"] on "peer0.org1.example.com" - - # Do sanity-check rich query - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"matt\\", \\"color\\":\\"green\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"alex\\", \\"color\\":\\"yellow\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"alex" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"jose\\", \\"color\\":\\"red\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"jose" - - # Explicitly check with CouchDB to confirm the index is set up correctly for the rich query to pass using index - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - -Examples: - | cc_path | index_path | language | jira_num | - | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | GOLANG | FAB-7252 | - | ../../fabric-samples/chaincode/marbles02/node | ../fabric-samples/chaincode/marbles02/node | NODE | FAB-7255 | - - -@daily - Scenario Outline: : Test CouchDB indexing using marbles chaincode using with 3 channels and 3 index with 1 selector - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - When a user defines a couchDB index named index_behave_test_1 with design document name "indexdoc_behave_test_1" containing the fields "owner" to the chaincode at path "" - And a user defines a couchDB index named index_behave_test_2 with design document name "indexdoc_behave_test_2" containing the fields "docType" to the chaincode at path "" - And a user defines a couchDB index named index_behave_test_3 with design document name "indexdoc_behave_test_3" containing the fields "color" to the chaincode at path "" - - # set up 3 channel, 1 cc - When an admin sets up a channel named "mychannel1" - And an admin sets up a channel named "mychannel2" - And an admin sets up a channel named "mychannel3" - And an admin deploys chaincode at path "" with args [""] with name "mycc1" with language "" on channel "mychannel1" - And an admin deploys chaincode at path "" with args [""] with name "mycc2" with language "" on channel "mychannel2" - And an admin deploys chaincode at path "" with args [""] with name "mycc3" with language "" on channel "mychannel3" - - # Invoke in the channel - When a user invokes on the channel "mychannel1" using chaincode named "mycc1" with args ["initMarble","marble1","green","10","matt"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel2" using chaincode named "mycc2" with args ["initMarble","marble2","yellow","20","alex"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel3" using chaincode named "mycc3" with args ["initMarble","marble3","red","5","jose"] on "peer0.org1.example.com" - - # Do sanity-check rich query - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"matt\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_1\\", \\"index_behave_test_1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"alex\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_1\\", \\"index_behave_test_1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"alex" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"jose\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_1\\", \\"index_behave_test_1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"jose" - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_2\\", \\"index_behave_test_2\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_2\\", \\"index_behave_test_2\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"alex" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_2\\", \\"index_behave_test_2\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"jose" - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"color\\":\\"green\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_3\\", \\"index_behave_test_3\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"color\\":\\"yellow\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_3\\", \\"index_behave_test_3\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"alex" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"color\\":\\"red\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_3\\", \\"index_behave_test_3\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"jose" - - # Explicitly check with CouchDB to confirm the index is set up correctly for the rich query to pass using index - When a user requests to get the design doc "indexdoc_behave_test_1" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_1" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_1" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_2" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_2":{"map":{"fields":{"docType":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_2" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_2":{"map":{"fields":{"docType":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_2" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_2":{"map":{"fields":{"docType":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_3" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_3":{"map":{"fields":{"color":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_3" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_3":{"map":{"fields":{"color":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_3" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_3":{"map":{"fields":{"color":"asc"}] from the couchDB container - - -Examples: - | cc_path | index_path | language | jira_num | - | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | GOLANG | FAB-7253 | - | ../../fabric-samples/chaincode/marbles02/node | ../fabric-samples/chaincode/marbles02/node | NODE | FAB-7256 | - - - Scenario Outline: : Test CouchDB indexing using CC upgrade with marbles chaincode using with 1 channel - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - When a user defines a couchDB index named index_behave_test with design document name "indexdoc_behave_test" containing the fields "owner,docType,color" to the chaincode at path "" - - # set up 1 channel, 1 cc - When an admin sets up a channel named "mychannel1" - And an admin deploys chaincode at path "" with version "0" with args [""] with name "mycc1" with language "" on channel "mychannel1" - - # Invoke in the channel - When a user invokes on the channel "mychannel1" using chaincode named "mycc1" with args ["initMarble","marble1","green","10","matt"] on "peer0.org1.example.com" - - #add another index and deploy version 1 - When a user defines a couchDB index named index_behave_test_v1 with design document name "indexdoc_behave_test_v1" containing the fields "owner" to the chaincode at path "" - And an admin installs chaincode at path "" of language "" as version "1" with args [""] with name "mycc1" - And an admin upgrades the chaincode with name "mycc1" on channel "mychannel1" to version "1" with args [""] - - # Do sanity-check rich query - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"matt\\", \\"color\\":\\"green\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"matt\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_v1\\", \\"index_behave_test_v1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - - # Explicitly check with CouchDB to confirm the index is set up correctly for the rich query to pass using index - When a user requests to get the design doc "indexdoc_behave_test_v1" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_v1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - -Examples: - | cc_path | index_path | language | jira_num | - | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | GOLANG | FAB-7263 | - | ../../fabric-samples/chaincode/marbles02/node | ../fabric-samples/chaincode/marbles02/node | NODE | FAB-7268 | - - - @daily - Scenario Outline: : Test CouchDB indexing using CC upgrade with marbles chaincode using with 3 channels and 1 upgrade - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - When a user defines a couchDB index named index_behave_test with design document name "indexdoc_behave_test" containing the fields "owner,docType,color" to the chaincode at path "" - - # set up 3 channels, 1 cc each - When an admin sets up a channel named "mychannel1" - And an admin sets up a channel named "mychannel2" - And an admin sets up a channel named "mychannel3" - And an admin deploys chaincode at path "" with version "0" with args [""] with name "mycc1" with language "" on channel "mychannel1" - And an admin deploys chaincode at path "" with version "0" with args [""] with name "mycc2" with language "" on channel "mychannel2" - And an admin deploys chaincode at path "" with version "0" with args [""] with name "mycc3" with language "" on channel "mychannel3" - - # Invoke in the channels - When a user invokes on the channel "mychannel1" using chaincode named "mycc1" with args ["initMarble","marble1","green","10","matt"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel2" using chaincode named "mycc2" with args ["initMarble","marble2","yellow","20","alex"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel3" using chaincode named "mycc3" with args ["initMarble","marble3","red","5","jose"] on "peer0.org1.example.com" - - #add another index and deploy version 1 in 1 channel/cc only - When a user defines a couchDB index named index_behave_test_v1 with design document name "indexdoc_behave_test_v1" containing the fields "owner" to the chaincode at path "" - And an admin installs chaincode at path "" of language "" as version "1" with args [""] with name "mycc1" - And an admin upgrades the chaincode with name "mycc1" on channel "mychannel1" to version "1" with args [""] - - # Do sanity-check rich query - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"matt\\", \\"color\\":\\"green\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"matt\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_v1\\", \\"index_behave_test_v1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"alex\\", \\"color\\":\\"yellow\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"alex" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"alex\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_v1\\", \\"index_behave_test_v1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"alex" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"jose\\", \\"color\\":\\"red\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"jose" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"jose\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_v1\\", \\"index_behave_test_v1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"jose" - - # Check index in CouchDB for channel1 with upgraded CC - When a user requests to get the design doc "indexdoc_behave_test_v1" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_v1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - - #Check index in CouchDB for channel2 with non-upgraded CC - And I wait "2" seconds - When a user requests to get the design doc "indexdoc_behave_test_v1" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - And I wait "2" seconds - Then a user receives error response of [{"error":"not_found","reason":"missing"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - - #Check index in CouchDB for channel3 with non-upgraded CC - And I wait "2" seconds - When a user requests to get the design doc "indexdoc_behave_test_v1" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - And I wait "2" seconds - Then a user receives error response of [{"error":"not_found","reason":"missing"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - -Examples: - | cc_path | index_path | language | jira_num | - | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | GOLANG | FAB-7264 | - | ../../fabric-samples/chaincode/marbles02/node | ../fabric-samples/chaincode/marbles02/node | NODE | FAB-7269 | - - -@daily - Scenario Outline: : Test CouchDB indexing using CC upgrade with marbles chaincode using with 3 channels and 3 upgrade - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - When a user defines a couchDB index named index_behave_test with design document name "indexdoc_behave_test" containing the fields "owner,docType,color" to the chaincode at path "" - - # set up 3 channels, 1 cc each - When an admin sets up a channel named "mychannel1" - And an admin sets up a channel named "mychannel2" - And an admin sets up a channel named "mychannel3" - And an admin deploys chaincode at path "" with version "0" with args [""] with name "mycc1" with language "" on channel "mychannel1" - And an admin deploys chaincode at path "" with version "0" with args [""] with name "mycc2" with language "" on channel "mychannel2" - And an admin deploys chaincode at path "" with version "0" with args [""] with name "mycc3" with language "" on channel "mychannel3" - - # Invoke in the channels - When a user invokes on the channel "mychannel1" using chaincode named "mycc1" with args ["initMarble","marble1","green","10","matt"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel2" using chaincode named "mycc2" with args ["initMarble","marble2","yellow","20","alex"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel3" using chaincode named "mycc3" with args ["initMarble","marble3","red","5","jose"] on "peer0.org1.example.com" - - #add another index and deploy version 1 in all 3 channel-cc - When a user defines a couchDB index named index_behave_test_v1 with design document name "indexdoc_behave_test_v1" containing the fields "owner" to the chaincode at path "" - And an admin installs chaincode at path "" of language "" as version "1" with args [""] with name "mycc1" - And an admin upgrades the chaincode with name "mycc1" on channel "mychannel1" to version "1" with args [""] - And an admin installs chaincode at path "" of language "" as version "1" with args [""] with name "mycc2" - And an admin upgrades the chaincode with name "mycc2" on channel "mychannel2" to version "1" with args [""] - And an admin installs chaincode at path "" of language "" as version "1" with args [""] with name "mycc3" - And an admin upgrades the chaincode with name "mycc3" on channel "mychannel3" to version "1" with args [""] - - # Do sanity-check rich query - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"matt\\", \\"color\\":\\"green\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"matt\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_v1\\", \\"index_behave_test_v1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"matt" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"alex\\", \\"color\\":\\"yellow\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"alex" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"alex\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_v1\\", \\"index_behave_test_v1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"alex" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"jose\\", \\"color\\":\\"red\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"jose" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"jose\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test_v1\\", \\"index_behave_test_v1\\"]}"] on "peer0.org1.example.com" - Then a user receives a response containing "owner":"jose" - - # Check index in CouchDB for channel1 with upgraded CC - When a user requests to get the design doc "indexdoc_behave_test_v1" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_v1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - - # Check index in CouchDB for channel2 with upgraded CC - When a user requests to get the design doc "indexdoc_behave_test_v1" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_v1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - - # Check index in CouchDB for channel3 with upgraded CC - When a user requests to get the design doc "indexdoc_behave_test_v1" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_v1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - - -Examples: - | cc_path | index_path | language | jira_num | - | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | GOLANG | FAB-7265, FAB-7266, FAB-7267 | - | ../../fabric-samples/chaincode/marbles02/node | ../fabric-samples/chaincode/marbles02/node | NODE | FAB-7270, FAB-7271, FAB-7272 | - - -@daily - Scenario Outline: : Test CouchDB indexing using install-after-instantiate with marbles chaincode using with 3 channels and 1 index - - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - When a user defines a couchDB index named index_behave_test with design document name "indexdoc_behave_test" containing the fields "owner,docType,color" to the chaincode at path "" - - # set up 3 channels, 1 cc each - When an admin sets up a channel named "mychannel1" - And an admin sets up a channel named "mychannel2" - And an admin sets up a channel named "mychannel3" - - # install the 3 ccs only in 3 (out of 4) peers and instantiate them - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc1" to "peer0.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc1" to "peer1.org2.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc1" to "peer0.org2.example.com" - And an admin instantiates the chaincode on channel "mychannel1" on peer "peer0.org1.example.com" - - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc2" to "peer0.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc2" to "peer1.org2.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc2" to "peer0.org2.example.com" - And an admin instantiates the chaincode on channel "mychannel2" on peer "peer0.org1.example.com" - - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc3" to "peer0.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc3" to "peer1.org2.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc3" to "peer0.org2.example.com" - And an admin instantiates the chaincode on channel "mychannel3" on peer "peer0.org1.example.com" - And I wait "10" seconds - - # Invoke in the channels - When a user invokes on the channel "mychannel1" using chaincode named "mycc1" with args ["initMarble","marble1","green","10","matt"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel2" using chaincode named "mycc2" with args ["initMarble","marble2","yellow","20","alex"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel3" using chaincode named "mycc3" with args ["initMarble","marble3","red","5","jose"] on "peer0.org1.example.com" - - # Now the late-install in 4th peer - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc1" to "peer1.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc2" to "peer1.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc3" to "peer1.org1.example.com" - - # Do sanity-check rich query in 4th peer - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"matt\\", \\"color\\":\\"green\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"matt" from "peer1.org1.example.com" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"alex\\", \\"color\\":\\"yellow\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"alex" from "peer1.org1.example.com" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\",\\"owner\\":\\"jose\\", \\"color\\":\\"red\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"jose" from "peer1.org1.example.com" - - # Explicitly check with CouchDB in 4th peer to confirm the index is set up correctly for the rich query to pass using index - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:8984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:8984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:8984" - Then a user receives success response of ["views":{"index_behave_test":{"map":{"fields":{"owner":"asc","docType":"asc","color":"asc"}] from the couchDB container - -Examples: - | cc_path | index_path | language | jira_num | - | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | GOLANG | FAB-7257, FAB-7258 | - | ../../fabric-samples/chaincode/marbles02/node | ../fabric-samples/chaincode/marbles02/node | NODE | FAB-7260, FAB-7261 | - - -@daily - Scenario Outline: : Test CouchDB indexing using install-after-instantiate with marbles chaincode using with 3 channels and 3 indexes - - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - When a user defines a couchDB index named index_behave_test_owner with design document name "indexdoc_behave_test" containing the fields "owner" to the chaincode at path "" - When a user defines a couchDB index named index_behave_test_docType with design document name "indexdoc_behave_test" containing the fields "docType" to the chaincode at path "" - When a user defines a couchDB index named index_behave_test_color with design document name "indexdoc_behave_test" containing the fields "color" to the chaincode at path "" - - # set up 3 channels, 1 cc each - When an admin sets up a channel named "mychannel1" - And an admin sets up a channel named "mychannel2" - And an admin sets up a channel named "mychannel3" - - # install the 3 ccs only in 3 (out of 4) peers and instantiate them - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc1" to "peer0.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc1" to "peer1.org2.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc1" to "peer0.org2.example.com" - And an admin instantiates the chaincode on channel "mychannel1" on peer "peer0.org1.example.com" - - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc2" to "peer0.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc2" to "peer1.org2.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc2" to "peer0.org2.example.com" - And an admin instantiates the chaincode on channel "mychannel2" on peer "peer0.org1.example.com" - - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc3" to "peer0.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc3" to "peer1.org2.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc3" to "peer0.org2.example.com" - And an admin instantiates the chaincode on channel "mychannel3" on peer "peer0.org1.example.com" - And I wait "10" seconds - - # Invoke in the channels - When a user invokes on the channel "mychannel1" using chaincode named "mycc1" with args ["initMarble","marble1","green","10","matt"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel2" using chaincode named "mycc2" with args ["initMarble","marble2","yellow","20","alex"] on "peer0.org1.example.com" - And a user invokes on the channel "mychannel3" using chaincode named "mycc3" with args ["initMarble","marble3","red","5","jose"] on "peer0.org1.example.com" - - # Now the late-install in 4th peer - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc1" to "peer1.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc2" to "peer1.org1.example.com" - And an admin installs chaincode at path "" of language "" as version "0" with args [""] with name "mycc3" to "peer1.org1.example.com" - - # Do sanity-check rich query in 4th peer - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"matt\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_owner\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"matt" from "peer1.org1.example.com" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"alex\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_owner\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"alex" from "peer1.org1.example.com" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"owner\\":\\"jose\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_owner\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"jose" from "peer1.org1.example.com" - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_docType\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"matt" from "peer1.org1.example.com" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_docType\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"alex" from "peer1.org1.example.com" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{\\"docType\\":\\"marble\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_docType\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"jose" from "peer1.org1.example.com" - When a user queries on the channel "mychannel1" using chaincode named "mycc1" with args ["queryMarbles", "{\\"selector\\":{ \\"color\\":\\"green\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_color\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"matt" from "peer1.org1.example.com" - When a user queries on the channel "mychannel2" using chaincode named "mycc2" with args ["queryMarbles", "{\\"selector\\":{ \\"color\\":\\"yellow\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_color\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"alex" from "peer1.org1.example.com" - When a user queries on the channel "mychannel3" using chaincode named "mycc3" with args ["queryMarbles", "{\\"selector\\":{ \\"color\\":\\"red\\"}, \\"use_index\\":[\\"_design/indexdoc_behave_test\\", \\"index_behave_test_color\\"]}"] on "peer1.org1.example.com" - Then a user receives a response containing "owner":"jose" from "peer1.org1.example.com" - - # Explicitly check with CouchDB to confirm the index is set up correctly for the rich query to pass using index - When a user requests to get the design doc "indexdoc_behave_test_1" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_1" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_1" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_1":{"map":{"fields":{"owner":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_2" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_2":{"map":{"fields":{"docType":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_2" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_2":{"map":{"fields":{"docType":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_2" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_2":{"map":{"fields":{"docType":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_3" for the chaincode named "mycc1" in the channel "mychannel1" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_3":{"map":{"fields":{"color":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_3" for the chaincode named "mycc2" in the channel "mychannel2" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_3":{"map":{"fields":{"color":"asc"}] from the couchDB container - When a user requests to get the design doc "indexdoc_behave_test_3" for the chaincode named "mycc3" in the channel "mychannel3" and from the CouchDB instance "http://localhost:5984" - Then a user receives success response of ["views":{"index_behave_test_3":{"map":{"fields":{"color":"asc"}] from the couchDB container - -Examples: - | cc_path | index_path | language | jira_num | - | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | github.com/hyperledger/fabric-samples/chaincode/marbles02/go | GOLANG | FAB-7259 | - | ../../fabric-samples/chaincode/marbles02/node | ../fabric-samples/chaincode/marbles02/node | NODE | FAB-7262 | diff --git a/app/platform/fabric/e2e-test/feature/README.rst b/app/platform/fabric/e2e-test/feature/README.rst deleted file mode 100644 index f329cd429..000000000 --- a/app/platform/fabric/e2e-test/feature/README.rst +++ /dev/null @@ -1,349 +0,0 @@ -Behave tests for Hyperledger Fabric Feature and System Tests -============================================================ - -.. image:: http://cdn.softwaretestinghelp.com/wp-content/qa/uploads/2007/08/regression-testing.jpg - -Behave is a tool used for Behavior Driven Development (BDD) testing. It uses tests (feature files) written in a natural language called Gherkin. The tests are executed using python as the supporting code. - -BDD is an agile software development technique that encourages collaboration between developers, QA and non-technical or business participants in a software project. Feel free to read more about `BDD`_. - -.. _BDD: http://pythonhosted.org/behave/philosophy.html - - -This drectory contains a behave implementation of system and feature file testing for Hyperledger Fabric. - -Full documentation and usage examples for Behave can be found in the `online documentation`_. - -.. _online documentation: http://pythonhosted.org/behave/ - - -Continuous Integration (CI) Execution -------------------------------------- -The following are links to the Jenkins execution of these tests: - * `daily`_ - * `weekly`_ - * `release`_ - -.. _daily: https://jenkins.hyperledger.org/view/Daily -.. _weekly: https://jenkins.hyperledger.org/view/Weekly -.. _release: https://jenkins.hyperledger.org/view/Release - - -Pre-requisites --------------- -You must have the following installed: - * `python`_ (You must have 2.7 due to package incompatibilities) - * `docker`_ - * `docker-compose`_ - * `nodejs`_ - * `npm`_ - -Ensure that you have Docker for `Linux`_, `Mac`_ or `Windows`_ 1.12 or higher properly installed on your machine. - -.. _python: https://www.python.org/ -.. _docker: https://www.docker.com/ -.. _docker-compose: https://docs.docker.com/compose/ -.. _nodejs: https://nodejs.org/ -.. _npm: https://www.npmjs.com/ -.. _Linux: https://docs.docker.com/engine/installation/#supported-platforms -.. _Mac: https://docs.docker.com/engine/installation/mac/ -.. _Windows: https://docs.docker.com/engine/installation/windows/ - -You can install Behave and additional packages, such as the `Govendor`_, using the ``./scripts/install_behave.sh`` (useful for linux distros that use the apt packaging manager). This script installs additional packages into your OS environment. - -.. _Govendor: https://github.com/kardianos/govendor/ - -:: - - $ cd /path/to/fabric-test/feature; ../scripts/install_behave.sh - - -The following repositories are dependencies and are included as submodules at the designated locations - * `hyperledger-fabric`_: /path/to/fabric-test/fabric - * `hyperledger-fabric-ca`_: /path/to/fabric-test/fabric-ca - * `cello`_: /path/to/fabric-test/cello - -.. _hyperledger-fabric: https://github.com/hyperledger/fabric -.. _hyperledger-fabric-ca: https://github.com/hyperledger/fabric-ca -.. _cello: https://github.com/hyperledger/cello - -.. _Chaincode Vendoring: -The following commands must be executed - `govendor init` - `govendor add +external` - -inside the chaincodes folder for any chaincode (GO language program) that imports packages, outside the shim or protos folder. - -If the chaincode has imports from external third party, i.e. other than the packages in fabric outside the shim or protos, execute: - `govendor fetch << fully qualified package name here >>` - -Note: This vendoring can be done automatically for you, wherever your chaincode is located, by using some predefined feature steps. For exact syntax, search for "vendor" in the steps/basic_impl.py file, or search for "vendor" in an example testcase in the chaincodes.feature file. - -Caveats and Gotchas -------------------- -* This framework uses the `signal`_ package, which currently only works well in NIX environments. -* When there is a need for executing commands that contain pipes (|) in a table, the user will need to use bang (!) instead. This is to make sure that the pipe in the command does not conflict with the pipe in the behave table. - -.. _signal: https://docs.python.org/2/library/signal.html - - -================ -Using VirtualEnv -================ -It is also possible to execute these tests using `virtualenv`_. This allows for you to control your environment and ensure that the version of python and any other environment settings will be exactly what is needed regardless of the environment of the native machine. - -.. _virtualenv: http://docs.python-guide.org/en/latest/dev/virtualenvs/ - -There are instructions for setting up a virtualenv for executing behave tests located at ``fabric-test/fabric/bddtests/README.md``. Once these steps are completed and you have successfully setup the ``behave_venv`` virtual environment, execute the following before executing these behave tests. - -:: - - $ workon behave_venv - - -Getting Started ---------------- -Before executing the behave tests, it is assumed that there are docker images and tools that have already been built. - -================ -Areas of Testing -================ -BDD tests are testing functionality and feature behavior. With this in mind, the following are areas that we plan to be covered in these BDD tests: - * Basic setup (Happy Path) - * Orderer Functionality - * solo - * kafka - * Ledgers - * Endorser and committer peers - * Fabric-CA (used for SSL connections) - * Upgrades and Fallbacks - * Bootstrapping - * configtxgen - * cryptogen - * configtxlator - * Growing and shrinking networks - * Stopping and Starting components - * … and more (such as different tooling, messages sizes, special scenarios) - -The following are not covered in these BDD tests: - * scalability - * performance - * long running tests - * stress testing - - -====================== -Building docker images -====================== -When executing tests that are using docker-compose fabric-ca images, be sure to have the fabric-ca docker images built. You must perform a ``make docker`` in the ``/path/to/hyperledger/fabric-test/fabric-ca`` directory. - -The docker images for ``peer``, ``orderer``, ``kafka``, and ``zookeeper`` are needed. You must perform a ``make docker`` in the ``/path/to/hyperledger/fabric-test/fabric`` directory. - - -========================= -Building tool executables -========================= -The **configtxgen**, **configtxlator** and **cryptogen** tools are used when bootstrapping the networks in these tests. As a result, you must perform a ``make configtxgen cryptogen configtxlator`` in the ``/path/to/hyperledger/fabric-test/fabric`` directory. Be sure that the executable location is added to your PATH. - - -How to Contribute --------------------------- - -.. image:: http://i.imgur.com/ztYl4lG.jpg - -There are different ways that you can contribute in this area. - * Writing feature files - * Writing python test code to execute the feature files - * Adding docker-compose files for different network configurations - -To add your contributions to the Hyperledger Fabric-test project, please refer to the `Hyperledger Fabric Contribution`_ page for more details. - -.. _Hyperledger Fabric Contribution: http://hyperledger-fabric.readthedocs.io/en/latest/CONTRIBUTING.html - -=================================== -How Do I Write My Own Feature File? -=================================== -The feature files are written by anyone who understands the requirements. This can be a business analyst, quality analyst, manager, developer, customer, etc. The file describes a feature or part of a feature with representative examples of expected outcomes and behaviors. These files are plain-text and do not require any compilation. Each feature step maps to a python step implementation. When choosing appropriate key phrases when writing feature files, look in the `Helpful Tools`_ section for more information. - -The following is an example of a simple feature file: - -.. sourcecode:: gherkin - - Feature: Test to ensure I take the correct accessory - Scenario: Test what happens on a rainy day - Given it is a new day - When the day is rainy - And the day is cold - Then we should bring an umbrella - Scenario Outline: Test what to bring - Given it is a new day - When the day is - Then we should bring - Examples: Accessories - | weather | accessory | - | hot | swimsuit | - | cold | coat | - | cloudy | nothing | - - -Keywords that are used when writing feature files: - * **Feature** - * The introduction of the different feature test scenarios - * You can have multiple scenarios for a single feature - * **Scenario/Scenario Outline** - * The title and description of the test - * You can run the same test with multiple inputs - * **Given** - * Indicates a known state before any interaction with the system. - * **Avoid talking about user interaction.** - * **When** - * Key actions are performed on the system. - * This is the step which may or may not cause some state to change in your system. - * **Then** - * The observed and expected outcomes. - * **And** - * Can be used when layering any givens, whens, or thens. - - -======================== -Writing python test code -======================== -Feature steps used in the feature file scenarios are implemented in python files stored in the “steps” directory. As the python implementation code grows, fewer changes to the code base will be needed in order to add new tests. If you simply want to write feature files, you are free to do so using the existing predefined feature steps. - -The behave implementation files are named '*_impl.py*' and the utilities are named '*_util.py*' in the steps directory. - -Python implementation steps are identified using decorators which match the keyword from the feature file: 'given', 'when', 'then', and 'and'. The decorator accepts a string containing the rest of the phrase used in the scenario step it belongs to. - - -.. sourcecode:: python - - >>> from behave import * - >>> @given('it is a new day') - ... def step_impl(context): - ... # Do some work - ... pass - >>> @when('the day is {weather}') - ... def step_impl(context, weather): - ... weatherMap = {'rainy': 'an umbrella', - ... 'sunny': 'shades', - ... 'cold': 'a coat'} - ... context.accessory = weatherMap.get(weather, "nothing") - >>> @then('we should bring {accessory}') - ... def step_impl(context, accessory): - ... assert context.accessory == accessory, "You're taking the wrong accessory!" - - -==================== -Docker-Compose Files -==================== -These docker composition files are used when setting up and tearing down networks of different configurations. Different tests can use different docker compose files depending on the test scenario. We are currently using `version 2 docker compose`_ files. - -.. _version 2 docker compose: https://docs.docker.com/compose/compose-file/compose-file-v2/ - - -============================ -How to execute Feature tests -============================ -There are multiple ways to execute behave tests. - * Execute all feature tests in the current directory - * Execute all tests in a specific feature file - * Execute all tests with a specified tag - * Execute a specific test - * Remote execution - * Execute with logs - -Local Execution ---------------- -When executing the behave tests locally, there are some simple commands that may be useful. - -**Executes all tests in directory** -:: - - $ behave - -**Executes specific feature file** -:: - - $ behave mytestfile.feature - -**Executes tests labelled with tag** -:: - - $ behave -t mytag - -**Executes tests labelled with tags not running the tests with skip label** -:: - - $ behave --tags=mytag1 --tags=mytag2 --tags=-skip - -**Executes a specific test** -:: - - $ behave -n 'my scenario name' - -**Executes a specific test saving logs of containers only when there are failures** -:: - - $ behave -n 'my scenario name' -D logs=y - -**Executes a specific test saving logs of containers** -:: - - $ behave -n 'my scenario name' -D logs=force - -**Executes a suite of tests on a remote network** -:: - - $ behave -t remote -D network= - - -Remote Execution ---------------- -When executing the behave tests remotely, there is some setup that needs to take place. Namely, it is important to know the information for the network that you are testing on. - -Also, note that any tests that are deemed well-suited for execution on a remote network are tagged with "remote". - - -Helpful Tools -------------- -Behave and the BDD ecosystem have a number of `tools`_ and extensions to assist in the development of tests. These tools include features that will display what feature steps are available for each keyword. Feel free to explore and use the tools, depending on your editor of choice. - -.. _tools: http://behave.readthedocs.io/en/latest/behave_ecosystem.html - - -Helpful Docker Commands ------------------------ -There are many helpful Docker tutorials available online. Feel free to take a look at some helpful hints found in the `Hyperledger Fabric Docker Tips`_ page. - -.. _Hyperledger Fabric Docker Tips: https://hyperledger-fabric.readthedocs.io/en/latest/chaincode4ade.html#download-docker-images - -Some helpful docker commands when debugging: - * View running containers - * ``$ docker ps`` - * View all containers (active and non-active) - * ``$ docker ps -a`` - * Stop all Docker containers - * ``$ docker stop $(docker ps -a -q)`` - * Remove all containers. Adding the `-f` will issue a "force" kill - * ``$ docker rm -f $(docker ps -aq)`` - * Remove all images - * ``$ docker rmi -f $(docker images -q)`` - * Remove all images except for hyperledger/fabric-baseimage - * ``$ docker rmi $(docker images | grep -v 'hyperledger/fabric-baseimage:latest' | awk {'print $3'})`` - * Start a container - * ``$ docker start `` - * Stop a containerID - * ``$ docker stop `` - * View network settings for a specific container - * ``$ docker inspect `` - * View logs for a specific containerID - * ``$ docker logs -f `` - * View docker images installed locally - * ``$ docker images`` - * View networks currently running - * ``$ docker networks ls`` - * Remove a specific residual network - * ``$ docker networks rm `` - -.. Licensed under Creative Commons Attribution 4.0 International License - https://creativecommons.org/licenses/by/4.0/ diff --git a/app/platform/fabric/e2e-test/feature/bootstrap.feature b/app/platform/fabric/e2e-test/feature/bootstrap.feature deleted file mode 100644 index a1ec09830..000000000 --- a/app/platform/fabric/e2e-test/feature/bootstrap.feature +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -Feature: Bootstrapping Hyperledger Fabric - As a user I want to be able to bootstrap my fabric network - -@daily -Scenario: FAB-3635: Bootstrap Network from Configuration files - Given I have a fabric config file - When the network is bootstrapped for an orderer - Then the "orderer.block" file is generated - When the network is bootstrapped for a channel named "mychannel" - Then the "mychannel.tx" file is generated - -@daily -Scenario: FAB-3854: Ensure genesis block generated by configtxgen contains correct data - Given I have a fabric config file - When the network is bootstrapped for an orderer - Then the "orderer.block" file is generated - And the orderer block "orderer.block" contains MSP - And the orderer block "orderer.block" contains root_certs - And the orderer block "orderer.block" contains tls_root_certs - And the orderer block "orderer.block" contains Writers - And the orderer block "orderer.block" contains Readers - And the orderer block "orderer.block" contains BlockValidation - And the orderer block "orderer.block" contains HashingAlgorithm - And the orderer block "orderer.block" contains OrdererAddresses - And the orderer block "orderer.block" contains ChannelRestrictions - And the orderer block "orderer.block" contains ChannelCreationPolicy - And the orderer block "orderer.block" contains mod_policy - When the network is bootstrapped for a channel named "mychannel" - Then the "mychannel.tx" file is generated - And the channel transaction file "mychannel.tx" contains Consortium - And the channel transaction file "mychannel.tx" contains mychannel - And the channel transaction file "mychannel.tx" contains Admins - And the channel transaction file "mychannel.tx" contains Writers - And the channel transaction file "mychannel.tx" contains Readers - And the channel transaction file "mychannel.tx" contains mod_policy - -@daily -Scenario Outline: FAB-3858: Verify crypto material (TLS) generated by cryptogen - Given I have a crypto config file with orgs, peers, orderers, and users - When the crypto material is generated for TLS network - Then crypto directories are generated containing tls certificates for orgs, peers, orderers, and users - Examples: - | numOrgs | peersPerOrg | numOrderers | numUsers | - | 2 | 2 | 3 | 1 | - | 3 | 2 | 3 | 3 | - -@daily -Scenario Outline: FAB-3856: Verify crypto material (non-TLS) generated by cryptogen - Given I have a crypto config file with orgs, peers, orderers, and users - When the crypto material is generated - Then crypto directories are generated containing certificates for orgs, peers, orderers, and users - Examples: - | numOrgs | peersPerOrg | numOrderers | numUsers | - | 2 | 2 | 3 | 1 | - | 3 | 2 | 3 | 3 | - | 2 | 3 | 4 | 4 | - | 10 | 5 | 1 | 10 | - -@smoke -Scenario: Access to the fabric protobuf files - Given I test the access to the generated python protobuf files - Then there are no errors - -###Comment out temporarily so smoke test suite will pass. This will allow us to merge, and then -### can run all the daily tests. Some of the daily tests may fail (the ones that require modifying env vars). -###This test fails , probably because the more recent code and/or capabilities are needed to allow modifying the logspec -###@smoke -Scenario: Setting of environment variables - Given the KAFKA_DEFAULT_REPLICATION_FACTOR environment variable is 1 - And the CONFIGTX_ORDERER_BATCHTIMEOUT environment variable is 10 minutes - And the CONFIGTX_ORDERER_BATCHSIZE_MAXMESSAGECOUNT environment variable is 10 - And the FABRIC_LOGGING_SPEC environment variable is gossip.discovery=DEBUG:nodeCmd=DEBUG - And I have a bootstrapped fabric network of type kafka with tls - Then the KAFKA_DEFAULT_REPLICATION_FACTOR environment variable is 1 on node "kafka1" - And the CONFIGTX_ORDERER_BATCHTIMEOUT environment variable is 10 minutes on node "orderer0.example.com" - And the CONFIGTX_ORDERER_BATCHSIZE_MAXMESSAGECOUNT environment variable is 10 on node "orderer1.example.com" - And the ORDERER_GENERAL_TLS_ENABLED environment variable is true on node "orderer2.example.com" - And the CORE_PEER_TLS_ENABLED environment variable is true on node "peer0.org1.example.com" - And the FABRIC_LOGGING_SPEC environment variable is gossip.discovery=DEBUG:nodeCmd=DEBUG on node "peer1.org2.example.com" - And the logs on peer1.org2.example.com contains "\[gossip.discovery\] periodicalSendAlive -> DEBU" within 30 seconds - And the logs on peer1.org2.example.com contains "\[nodeCmd\] serve -> DEBU" within 15 seconds - - -#@doNotDecompose -@daily -Scenario Outline: FAB-4776/FAB-4777: Bring up a based network and check peers - Given I have a bootstrapped fabric network of type using state-database - When an admin sets up a channel - And an admin deploys chaincode - And the orderer node logs receiving the orderer block - And a user queries on the chaincode with args ["query","a"] - Then a user receives a success response of 100 - When an admin fetches genesis information using peer "peer1.org1.example.com" from "orderer0.example.com" to location "." - Then the config block file is fetched from peer "peer1.org1.example.com" at location "." - When a user queries on the chaincode with args ["query","a"] from "peer1.org1.example.com" - Then a user receives a success response of 100 from "peer1.org1.example.com" - When an admin fetches genesis information using peer "peer1.org2.example.com" from "orderer0.example.com" to location "." - Then the config block file is fetched from peer "peer1.org2.example.com" at location "." - When a user queries on the chaincode with args ["query","a"] from "peer1.org2.example.com" - Then a user receives a success response of 100 from "peer1.org2.example.com" -Examples: - | database | orderertype | - | leveldb | solo | -# | couchdb | kafka | - - -@daily -Scenario: FAB-4773: Fetching of a channel genesis block - Given I have a crypto config file with 2 orgs, 2 peers, 3 orderers, and 2 users - When the crypto material is generated for TLS network - Given I have a fabric config file - When the network is bootstrapped for an orderer - When I start a fabric network with TLS - When the network is bootstrapped for a channel named "mychannel" - When an admin creates a channel named "mychannel" - And an admin fetches genesis information for a channel "mychannel" using peer "peer1.org1.example.com" - Then the "mychannel.block" file is generated - Then the "mychannel.block" file is fetched from peer "peer1.org1.example.com" diff --git a/app/platform/fabric/e2e-test/feature/chaincodes.feature b/app/platform/fabric/e2e-test/feature/chaincodes.feature deleted file mode 100644 index 11d06c59f..000000000 --- a/app/platform/fabric/e2e-test/feature/chaincodes.feature +++ /dev/null @@ -1,528 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -Feature: Chaincodes Testing - - -@daily -Scenario Outline: FAB-5797: Test chaincode fabric/examples/example02/cmd deploy, invoke, and query with chaincode install name in all lowercase/uppercase/mixedcase chars, for orderer - Given I have a bootstrapped fabric network of type - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "" - When a user queries on the chaincode named "" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "" with args ["invoke","a","b","10"] - And I wait "3" seconds - When a user queries on the chaincode named "" with args ["query","a"] - Then a user receives a success response of 990 -Examples: - | type | ccName | - | solo | mycc | - | solo | MYCC | - | solo | MYcc_Test | - | kafka | mycc | - | kafka | MYCC | - | kafka | MYcc_Test | - -@daily -Scenario Outline: FAB-11808: Test the use of the network model API to successfully commit to the ledger - Given I have a bootstrapped fabric network of type - And I use the NodeJS SDK interface - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - # evaluating a transaction == query, but using the network model API - When a user evaluates a transaction on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - # submitting a transaction == invoke, but using the network model API - When a user submits a transaction on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user evaluates a transaction on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 -Examples: - | type | security | - | solo | without tls | - | kafka | with tls | - -@daily -Scenario: FAB-4703: FAB-5663, Test chaincode calling chaincode - fabric/examples/example04/cmd - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example04/cmd" with args ["init","Event","1"] with name "myex04" - When an admin sets up a channel named "channel2" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "myex02_a" on channel "channel2" - When a user queries on the channel "channel2" using chaincode named "myex02_a" with args ["query","a"] - Then a user receives a success response of 1000 - When a user queries on the chaincode named "myex04" with args ["query","Event", "myex02_a", "a", "channel2"] - Then a user receives a success response of 1000 - -@shimAPI -@daily -Scenario: FAB-4717: FAB-5663, chaincode-to-chaincode testing passing in channel name as a third argument to chaincode_ex05 when cc_05 and cc_02 are on different channels - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example05/cmd" with args ["init","sum","0"] with name "myex05" - When an admin sets up a channel named "channel2" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "myex02_b" on channel "channel2" - When a user queries on the channel "channel2" using chaincode named "myex02_b" with args ["query","a"] - Then a user receives a success response of 1000 - When a user queries on the chaincode named "myex05" with args ["query","myex02_b", "sum", "channel2"] - Then a user receives a success response of 3000 - - -@daily -Scenario: FAB-4718: FAB-5663, chaincode-to-chaincode testing passing an empty string for channel_name when cc_05 and cc_02 are on the same channel - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example05/cmd" with args ["init","sum","0"] with name "myex05" - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "myex02_b" - When a user queries on the chaincode named "myex02_b" with args ["query","a"] - Then a user receives a success response of 1000 - When a user queries on the chaincode named "myex05" with args ["query","myex02_b", "sum", ""] - Then a user receives a success response of 3000 - - -@daily -Scenario: FAB-4720: FAB-5663, Test chaincode calling chaincode -ve test case passing an incorrect or non-existing channnel name when cc_ex02 and cc_ex05 installed on same channels - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example05/cmd" with args ["init","sum","0"] with name "myex05" - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "myex02_b" - When a user queries on the chaincode named "myex02_b" with args ["query","a"] - Then a user receives a success response of 1000 - When a user queries on the chaincode named "myex05" with args ["query","myex02_b", "sum", "non-existing-channel"] - Then a user receives an error response of status:500 - And a user receives an error response of Failed to get policy manager for channel [non-existing-channel] - - -@daily -Scenario: FAB-4721: FAB-5663, Test chaincode calling chaincode -ve testcase passing an incorrect ot non-existing string for channelname when cc_ex02 and cc_ex05 installed on different channels - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example05/cmd" with args ["init","sum","0"] with name "myex05" - When an admin sets up a channel named "channel2" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "myex02_b" on channel "channel2" - When a user queries on the channel "channel2" using chaincode named "myex02_b" with args ["query","a"] - Then a user receives a success response of 1000 - When a user queries on the chaincode named "myex05" with args ["query","myex02_b", "sum", "non-existing-channel"] - Then a user receives an error response of status:500 - And a user receives an error response of Failed to get policy manager for channel [non-existing-channel] - - -@daily -Scenario: FAB-4722: FAB-5663, Test chaincode calling chaincode -ve testcase passing an empty string for channelname when cc_ex02 and cc_ex05 installed on different channels - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example05/cmd" with args ["init","sum","0"] with name "myex05" - When an admin sets up a channel named "channel2" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "myex02_b" on channel "channel2" - When a user queries on the channel "channel2" using chaincode named "myex02_b" with args ["query","a"] - Then a user receives a success response of 1000 - When a user queries on the chaincode named "myex05" with args ["query","myex02_b", "sum", ""] - Then a user receives an error response of status:500 - And a user receives an error response of chaincode myex02_b not found - -@daily -Scenario: FAB-5384: FAB-5663, Test chaincode calling chaincode with two args cc_ex02 and cc_ex05 installed on same channels - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example05/cmd" with args ["init","sum","0"] with name "myex05" - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "myex02_b" - When a user queries on the chaincode named "myex02_b" with args ["query","a"] - Then a user receives a success response of 1000 - When a user queries on the chaincode named "myex05" with args ["query","myex02_b", "sum"] - Then a user receives a success response of 3000 - - -@daily -Scenario Outline: FAB-3888: State Transfer Test, bouncing a non-leader peer, using marbles02, for orderer - Given the FABRIC_LOGGING_SPEC environment variable is gossip.election=DEBUG - And I have a bootstrapped fabric network of type - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/marbles02" with args [""] with name "mycc" - - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble1","red","35","tom"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble1"] - Then a user receives a success response of {"docType":"marble","name":"marble1","color":"red","size":35,"owner":"tom"} - - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble111","pink","55","jane"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] - Then a user receives a success response of {"docType":"marble","name":"marble111","color":"pink","size":55,"owner":"jane"} - - When the initial non-leader peer of "org1" is taken down - - And a user invokes on the chaincode named "mycc" with args ["transferMarble","marble111","jerry"] on the initial leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] on the initial leader peer of "org1" - Then a user receives a success response of {"docType":"marble","name":"marble111","color":"pink","size":55,"owner":"jerry"} from the initial leader peer of "org1" - And I wait "3" seconds - When a user invokes on the chaincode named "mycc" with args ["transferMarble","marble111","tom"] on the initial leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] on the initial leader peer of "org1" - Then a user receives a success response of {"docType":"marble","name":"marble111","color":"pink","size":55,"owner":"tom"} from the initial leader peer of "org1" - - When the initial non-leader peer of "org1" comes back up - - And I wait "30" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] on the initial non-leader peer of "org1" - Then a user receives a success response of {"docType":"marble","name":"marble111","color":"pink","size":55,"owner":"tom"} from the initial non-leader peer of "org1" - - Examples: - | type | - | solo | - | kafka | - -@smoke -Scenario: FAB-6211: Test example02 chaincode written using NODE without tls - Given I have a bootstrapped fabric network of type solo without tls - When an admin sets up a channel - And an admin deploys chaincode at path "../../fabric-test/chaincodes/example02/node" with args ["init","a","1000","b","2000"] with name "mycc" with language "NODE" - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - When a user queries on the chaincode named "mycc" with args ["query","b"] - Then a user receives a success response of 2010 - - -@daily -Scenario Outline: FAB-6211: Test example02 chaincode written using - Given I have a bootstrapped fabric network of type solo - When an admin sets up a channel - And an admin deploys chaincode at path "" with args ["init","a","1000","b","2000"] with name "mycc" with language "" - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - When a user queries on the chaincode named "mycc" with args ["query","b"] - Then a user receives a success response of 2010 -Examples: - | path | language | security | - | github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd | GOLANG | with tls | - | github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd | GOLANG | without tls | - | ../../fabric-test/chaincodes/example02/node | NODE | with tls | - - - -@shimAPI -@daily -Scenario Outline: FAB-6256: Test support of rich queries in SHIM API: queryMarbles and queryMarblesByOwner using marbles chaincode on couchdb - Given I have a bootstrapped fabric network of type solo using state-database couchdb with tls - When an admin sets up a channel - And an admin deploys chaincode at path "" with args [""] with language "" - - When a user invokes on the chaincode with args ["initMarble","marble1","blue","35","tom"] - When a user invokes on the chaincode with args ["initMarble","marble2","red","50","tom"] - And I wait "3" seconds - When a user queries on the chaincode with args ["readMarble","marble1"] - Then a user receives a response containing "name":"marble1" - And a user receives a response containing "owner":"tom" - - When a user queries on the chaincode with args ["readMarble","marble2"] - Then a user receives a response containing "name":"marble2" - And a user receives a response containing "owner":"tom" - - # queryMarblesByOwner - When a user queries on the chaincode with args ["queryMarblesByOwner","tom"] - Then a user receives a response containing "Key":"marble1" - And a user receives a response containing "name":"marble1" - And a user receives a response containing "owner":"tom" - And a user receives a response containing "Key":"marble2" - And a user receives a response containing "name":"marble2" - - # queryMarbles - When a user queries on the chaincode with args ["queryMarbles","{\\"selector\\":{\\"owner\\":\\"tom\\"}}"] - Then a user receives a response containing "Key":"marble1" - And a user receives a response containing "name":"marble1" - And a user receives a response containing "owner":"tom" - And a user receives a response containing "Key":"marble2" - And a user receives a response containing "name":"marble2" - - # queryMarbles on more than one selector - When a user queries on the chaincode with args ["queryMarbles","{\\"selector\\":{\\"owner\\":\\"tom\\",\\"color\\":\\"red\\"}}"] - - Then a user receives a response containing "Key":"marble2" - And a user receives a response containing "name":"marble2" - And a user receives a response containing "color":"red" - And a user receives a response containing "owner":"tom" - Then a user receives a response not containing "Key":"marble1" - And a user receives a response not containing "color":"blue" - - When a user invokes on the chaincode with args ["transferMarble","marble1","jerry"] - And I wait "3" seconds - And a user queries on the chaincode with args ["readMarble","marble1"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble1" - And a user receives a response containing "color":"blue" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"jerry" - When a user invokes on the chaincode with args ["transferMarble","marble2","jerry"] - And I wait "3" seconds - And a user queries on the chaincode with args ["readMarble","marble2"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble2" - And a user receives a response containing "color":"red" - And a user receives a response containing "size":50 - And a user receives a response containing "owner":"jerry" - - When a user queries on the chaincode with args ["queryMarbles","{\\"selector\\":{\\"owner\\":\\"tom\\"}}"] - Then a user receives a success response of [] -Examples: - | path | language | - | github.com/hyperledger/fabric/examples/chaincode/go/marbles02 | GOLANG | - | ../../fabric-test/chaincodes/marbles/node | NODE | - -@daily -Scenario Outline: FAB-6439: Test chaincode enccc_example.go which uses encshim library extensions for orderer - #To generate good keys, we followed instructions as in the README.md under "github.com/hyperledger/fabric/examples/chaincode/go/enccc_example" folder - # ENCKEY=`openssl rand 32 -base64` - # IV=`openssl rand 16 -base64` - # SIGKEY=`openssl ecparam -name prime256v1 -genkey | tail -n5 | base64 -w0` - Given I have a bootstrapped fabric network of type - When an admin sets up a channel - #Warning: if you see errors during deploy instantiation, you may need to first remove outdated vendored materials inside the chaincode folder, - #e.g. `rm -rf ../../fabric-test/fabric/examples/chaincode/go/encc/vendor` - And I vendor "GOLANG" packages for fabric-based chaincode at "../fabric/examples/chaincode/go/enccc_example" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/enccc_example" with args ["init", ""] with name "mycc" - And I locally execute the command "openssl rand 32 -base64" saving the results as "ENCKEY" - And a user invokes on the chaincode named "mycc" with args ["ENCRYPT","Social-Security-Number","123-45-6789"] and generated transient args "{\\"ENCKEY\\":\\"{ENCKEY}\\"}" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["DECRYPT", "Social-Security-Number"] and generated transient args "{\\"DECKEY\\":\\"{ENCKEY}\\"}" - Then a user receives a success response of 123-45-6789 - When I locally execute the command "openssl rand 16 -base64" saving the results as "IV" - When a user invokes on the chaincode named "mycc" with args ["ENCRYPT","Tax-Id","1234-012"] and generated transient args "{\\"ENCKEY\\":\\"{ENCKEY}\\",\\"IV\\":\\"{IV}\\"}" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["DECRYPT", "Tax-Id"] and generated transient args "{\\"DECKEY\\":\\"{ENCKEY}\\",\\"IV\\":\\"{IV}\\"}" - Then a user receives a response containing 1234-012 - When I locally execute the command "openssl ecparam -name prime256v1 -genkey | tail -n5 | base64 -w0" saving the results as "SIGKEY" - When a user invokes on the chaincode named "mycc" with args ["ENCRYPTSIGN","Passport-Number","M9037"] and generated transient args "{\\"ENCKEY\\":\\"{ENCKEY}\\",\\"SIGKEY\\":\\"{SIGKEY}\\"}" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["DECRYPTVERIFY","Passport-Number"] and generated transient args "{\\"DECKEY\\":\\"{ENCKEY}\\",\\"VERKEY\\":\\"{SIGKEY}\\"}" - Then a user receives a response containing M9037 - When a user invokes on the chaincode named "mycc" with args ["ENCRYPT","WellsFargo-Savings-Account","09675879"] and generated transient args "{\\"ENCKEY\\":\\"{ENCKEY}\\"}" - When a user invokes on the chaincode named "mycc" with args ["ENCRYPT","BankOfAmerica-Savings-Account","08123456"] and generated transient args "{\\"ENCKEY\\":\\"{ENCKEY}\\"}" - And I wait "3" seconds - When a user invokes on the chaincode named "mycc" with args ["ENCRYPT","Employee-Number1","123-00-6789"] and generated transient args "{\\"ENCKEY\\":\\"{ENCKEY}\\"}" - And I wait "3" seconds - When a user invokes on the chaincode named "mycc" with args ["ENCRYPT","Employee-Number2","123-45-0089"] and generated transient args "{\\"ENCKEY\\":\\"{ENCKEY}\\"}" - And I wait "3" seconds - #for range use keys encrypted with 'ENC' 'PUT' - When a user queries on the chaincode named "mycc" with args ["RANGEQUERY"] and generated transient args "{\\"DECKEY\\":\\"{ENCKEY}\\"}" - Then a user receives a response containing "key":"Employee-Number1" - And a user receives a response containing "value":"123-00-6789" - And a user receives a response containing "key":"Employee-Number2" - And a user receives a response containing "value":"123-45-0089" - And a user receives a response containing "key":"WellsFargo-Savings-Account" - And a user receives a response containing "value":"09675879" - And a user receives a response containing "key":"BankOfAmerica-Savings-Account" - And a user receives a response containing "value":"08123456" - -Examples: - | type | - | solo | - | kafka | - - -@daily -Scenario Outline: FAB-6650: Test chaincode enccc_example.go negative scenario, passing in bad ENCRYPTION(ENC), IV, and SIGNATURE(SIG) KEYS - #To generate good keys, we followed instructions as in the README.md under "github.com/hyperledger/fabric/examples/chaincode/go/enccc_example" folder - # ENCKEY=`openssl rand 32 -base64` - # IV=`openssl rand 16 -base64` - # SIGKEY=`openssl ecparam -name prime256v1 -genkey | tail -n5 | base64 -w0` - # For the things we called BAD keys in this test, we deleted last character from the generated good keys to corrupt them. - - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And I vendor "GOLANG" packages for fabric-based chaincode at "../fabric/examples/chaincode/go/enccc_example" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/enccc_example" with args ["init", ""] with name "mycc" - - #first we test for invoke failures by passing in bad keys - When a user invokes on the chaincode named "mycc" with args ["ENCRYPT","Social-Security-Number","123-45-6789"] and transient args "{\\"ENCKEY\\":\\"\\"}" - Then a user receives an error response of Error: error parsing transient string: illegal base64 data at input byte 40 - proposal response: - When a user invokes on the chaincode named "mycc" with args ["ENCRYPT","Tax-Id","1234-012"] and transient args "{\\"ENCKEY\\":\\"\\",\\"IV\\":\\"\\"}" - Then a user receives an error response of Error: error parsing transient string: illegal base64 data at input byte 23 - proposal response: - When a user invokes on the chaincode named "mycc" with args ["ENCRYPTSIGN","Passport-Number","M9037"] and transient args "{\\"ENCKEY\\":\\"\\",\\"SIGKEY\\":\\"\\"}" - Then a user receives an error response of Error: error parsing transient string: illegal base64 data at input byte 300 - proposal response: - - #here we make sure invokes pass but test for query failures by passing in bad keys - When I locally execute the command "openssl rand 32 -base64" saving the results as "ENCKEY" - When a user invokes on the chaincode named "mycc" with args ["ENCRPYT","Employee-Number1","123-00-6789"] and generated transient args "{\\"ENCKEY\\":\\"{ENCKEY}\\"}" - And I wait "5" seconds - #query an encrypted entity without passing Encryption key - When a user queries on the chaincode named "mycc" with args ["ENCRYPT","Social-Security-Number"] - Then a user receives an error response of status:500 - And a user receives an error response of Expected transient encryption key ENCKEY - #query passing in bad_enc_key - When a user invokes on the chaincode named "mycc" with args ["ENCRYPT","Social-Security-Number","123-45-6789"] and transient args "{\\"ENCKEY\\":\\"\\"}" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["ENCRYPT","Social-Security-Number"] and generated transient args "{\\"ENCKEY\\":\\"\\"}" - Then a user receives an error response of Error: error parsing transient string: illegal base64 data at input byte 40 - proposal response: - -Examples: - | GOOD_ENC_KEY | BAD_ENC_KEY | BAD_IV_KEY | BAD_SIG_KEY | - | L6P9jLWR6d6E1KdGJBsUpzEm5QS6uVlS4onsteB+KaQ= | L6P9jLWR6d6E1KdGJBsUpzEm5QS6uVlS4onsteB+KaQ | +4DANc5uYLTnsH6Yy7v32g= | LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUhYRkd1eWxyTlQ1WUdtd1E0MVBWeTJqVlZrcXhMMTdBN1pSM0lDL1RGakJvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFVHdWSEhrbklmUnUyZ3YwWU50R210akpDSHJzdThhekZ1OWZvUy9raUlPN2Q2aWhTWWRjdgpHbEoyNlF0WmtTTlhWNkJDLy91Z25ycGN3bldTdERsc1lRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo | - - -@shimAPI -@daily -Scenario Outline: FAB-5791: Test API in SHIM interface using marbles02 and shimApiDriver chaincodes for orderer db lang -# | shim API in fabric/core/shim/chaincode.go | Covered in marbles02 chaincode | -# | for chaincode invocation -# | Init | init | -# | Invoke | invoke | -# | GetState | readMarble, initMarble, transferMarble | -# | PutState | initMarble, transferMarble | -# | DelState | deleteMarble | -# | CreateCompositeKey | initMarble, deleteMarble | -# | SplitCompositeKey | transferMarblesBasedOnColor | -# | GetStateByRange | transferMarblesBasedOnColor | -# | GetQueryResult | FAB-6256 readMarbles,queryMarbles,queryMarblesByOwner | -# | GetHistoryForKey | getHistoryForMarble | -# | GetStatePartialCompositeKeyQuery | transferMarblesBasedOnColor | - -# | | Covered in shimApiDriver chaincode -# | GetArgs | getArgs | -# | GetArgsSlice | getArgsSlice | -# | GetStringArgs | getStringArgs | -# | GetFunctionAndParameters | getFunctionAndParameters | - -# | GetBinding | getBinding | -# | GetCreator | getCreator | -# | GetTxTimeStamp | getTxTimeStamp | -# | GetSignedProposal | getSignedProposal | -# | GetTransient | getTransient | -# | GetTxID | | -# | GetDecorations | | -# | SetEvent | | - -# | InvokeChaincode | FAB-4717 ch_ex05 calling ch_ex02 | - - Given I have a bootstrapped fabric network of type - When an admin sets up a channel - And I vendor "" packages for fabric-based chaincode at "" - When an admin deploys chaincode at path "" with args [""] with name "mycc" with language "" - When an admin deploys chaincode at path "" with args [""] with name "myShimAPI" with language "" - - - #first two marbles are used for getMarblesByRange - When a user invokes on the chaincode named "mycc" with args ["initMarble","001m1","indigo","35","saleem"] - When a user invokes on the chaincode named "mycc" with args ["initMarble","004m4","green","35","dire straits"] - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble1","red","35","tom"] - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble2","blue","55","jerry"] - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble111","pink","55","jane"] - And I wait "5" seconds - - When a user queries on the chaincode named "mycc" with args ["readMarble","marble1"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble1" - And a user receives a response containing "color":"red" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"tom" - - - When a user queries on the chaincode named "mycc" with args ["readMarble","marble2"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble2" - And a user receives a response containing "color":"blue" - And a user receives a response containing "size":55 - And a user receives a response containing "owner":"jerry" - - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble111" - And a user receives a response containing "color":"pink" - And a user receives a response containing "size":55 - And a user receives a response containing "owner":"jane" - -#Test transferMarble - When a user invokes on the chaincode named "mycc" with args ["transferMarble","marble1","jerry"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble1"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble1" - And a user receives a response containing "color":"red" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"jerry" - -# Begin creating marbles to to test transferMarblesBasedOnColor - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble100","red","5","cassey"] - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble101","blue","6","cassey"] - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble200","purple","5","ram"] - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble201","blue","6","ram"] - And I wait "3" seconds - - When a user invokes on the chaincode named "mycc" with args ["transferMarblesBasedOnColor","blue","jerry"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble100"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble100" - And a user receives a response containing "color":"red" - And a user receives a response containing "size":5 - And a user receives a response containing "owner":"cassey" - - - When a user queries on the chaincode named "mycc" with args ["readMarble","marble101"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble101" - And a user receives a response containing "color":"blue" - And a user receives a response containing "size":6 - And a user receives a response containing "owner":"jerry" - - - When a user queries on the chaincode named "mycc" with args ["readMarble","marble200"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble200" - And a user receives a response containing "color":"purple" - And a user receives a response containing "size":5 - And a user receives a response containing "owner":"ram" - - When a user queries on the chaincode named "mycc" with args ["readMarble","marble201"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble201" - And a user receives a response containing "color":"blue" - And a user receives a response containing "size":6 - And a user receives a response containing "owner":"jerry" - - -# Test getMarblesByRange - When a user queries on the chaincode named "mycc" with args ["getMarblesByRange","001m1", "005m4"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"001m1" - And a user receives a response containing "color":"indigo" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"saleem" - - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"004m4" - And a user receives a response containing "color":"green" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"dire straits" - - #delete a marble - When a user invokes on the chaincode named "mycc" with args ["delete","marble201"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble201"] - Then a user receives an error response of status:500 - And a user receives an error response of {\"Error\":\"Marble does not exist: marble201\"} - - When a user queries on the chaincode named "myShimAPI" with args ["getTxTimestamp"] - When a user queries on the chaincode named "myShimAPI" with args ["getCreator"] - When a user invokes on the chaincode named "myShimAPI" with args ["getBinding"] - When a user queries on the chaincode named "myShimAPI" with args ["getSignedProposal"] - When a user queries on the chaincode named "myShimAPI" with args ["getTransient"] - - Examples: - | type | database | marbles02Path | VendorPath | shimAPIDriverPath | language | - | solo | leveldb | github.com/hyperledger/fabric/examples/chaincode/go/marbles02 | ../chaincodes/shimApiDriver/go | github.com/hyperledger/fabric-test/chaincodes/shimApiDriver/go | GOLANG | - | kafka | couchdb | github.com/hyperledger/fabric/examples/chaincode/go/marbles02 | ../chaincodes/shimApiDriver/go | github.com/hyperledger/fabric-test/chaincodes/shimApiDriver/go | GOLANG | - | solo | leveldb | ../../fabric-test/chaincodes/marbles/node | ../chaincodes/shimApiDriver/node | ../../fabric-test/chaincodes/shimApiDriver/node | NODE | - | kafka | couchdb | ../../fabric-test/chaincodes/marbles/node | ../chaincodes/shimApiDriver/node | ../../fabric-test/chaincodes/shimApiDriver/node | NODE | diff --git a/app/platform/fabric/e2e-test/feature/common/__init__.py b/app/platform/fabric/e2e-test/feature/common/__init__.py deleted file mode 100644 index 49cd7f3ac..000000000 --- a/app/platform/fabric/e2e-test/feature/common/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/app/platform/fabric/e2e-test/feature/common/common_pb2.py b/app/platform/fabric/e2e-test/feature/common/common_pb2.py deleted file mode 100644 index 244271537..000000000 --- a/app/platform/fabric/e2e-test/feature/common/common_pb2.py +++ /dev/null @@ -1,774 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: common/common.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='common/common.proto', - package='common', - syntax='proto3', - serialized_pb=_b('\n\x13\x63ommon/common.proto\x12\x06\x63ommon\x1a\x1fgoogle/protobuf/timestamp.proto\"\x1b\n\nLastConfig\x12\r\n\x05index\x18\x01 \x01(\x04\"H\n\x08Metadata\x12\r\n\x05value\x18\x01 \x01(\x0c\x12-\n\nsignatures\x18\x02 \x03(\x0b\x32\x19.common.MetadataSignature\"@\n\x11MetadataSignature\x12\x18\n\x10signature_header\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\":\n\x06Header\x12\x16\n\x0e\x63hannel_header\x18\x01 \x01(\x0c\x12\x18\n\x10signature_header\x18\x02 \x01(\x0c\"\xa2\x01\n\rChannelHeader\x12\x0c\n\x04type\x18\x01 \x01(\x05\x12\x0f\n\x07version\x18\x02 \x01(\x05\x12-\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\nchannel_id\x18\x04 \x01(\t\x12\r\n\x05tx_id\x18\x05 \x01(\t\x12\r\n\x05\x65poch\x18\x06 \x01(\x04\x12\x11\n\textension\x18\x07 \x01(\x0c\"1\n\x0fSignatureHeader\x12\x0f\n\x07\x63reator\x18\x01 \x01(\x0c\x12\r\n\x05nonce\x18\x02 \x01(\x0c\"7\n\x07Payload\x12\x1e\n\x06header\x18\x01 \x01(\x0b\x32\x0e.common.Header\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\".\n\x08\x45nvelope\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\"v\n\x05\x42lock\x12#\n\x06header\x18\x01 \x01(\x0b\x32\x13.common.BlockHeader\x12\x1f\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x11.common.BlockData\x12\'\n\x08metadata\x18\x03 \x01(\x0b\x32\x15.common.BlockMetadata\"G\n\x0b\x42lockHeader\x12\x0e\n\x06number\x18\x01 \x01(\x04\x12\x15\n\rprevious_hash\x18\x02 \x01(\x0c\x12\x11\n\tdata_hash\x18\x03 \x01(\x0c\"\x19\n\tBlockData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x0c\"!\n\rBlockMetadata\x12\x10\n\x08metadata\x18\x01 \x03(\x0c*\xaa\x01\n\x06Status\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x07SUCCESS\x10\xc8\x01\x12\x10\n\x0b\x42\x41\x44_REQUEST\x10\x90\x03\x12\x0e\n\tFORBIDDEN\x10\x93\x03\x12\x0e\n\tNOT_FOUND\x10\x94\x03\x12\x1d\n\x18REQUEST_ENTITY_TOO_LARGE\x10\x9d\x03\x12\x1a\n\x15INTERNAL_SERVER_ERROR\x10\xf4\x03\x12\x18\n\x13SERVICE_UNAVAILABLE\x10\xf7\x03*\x99\x01\n\nHeaderType\x12\x0b\n\x07MESSAGE\x10\x00\x12\n\n\x06\x43ONFIG\x10\x01\x12\x11\n\rCONFIG_UPDATE\x10\x02\x12\x18\n\x14\x45NDORSER_TRANSACTION\x10\x03\x12\x17\n\x13ORDERER_TRANSACTION\x10\x04\x12\x15\n\x11\x44\x45LIVER_SEEK_INFO\x10\x05\x12\x15\n\x11\x43HAINCODE_PACKAGE\x10\x06*[\n\x12\x42lockMetadataIndex\x12\x0e\n\nSIGNATURES\x10\x00\x12\x0f\n\x0bLAST_CONFIG\x10\x01\x12\x17\n\x13TRANSACTIONS_FILTER\x10\x02\x12\x0b\n\x07ORDERER\x10\x03\x42S\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/commonb\x06proto3') - , - dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -_STATUS = _descriptor.EnumDescriptor( - name='Status', - full_name='common.Status', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='UNKNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SUCCESS', index=1, number=200, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BAD_REQUEST', index=2, number=400, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FORBIDDEN', index=3, number=403, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NOT_FOUND', index=4, number=404, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='REQUEST_ENTITY_TOO_LARGE', index=5, number=413, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INTERNAL_SERVER_ERROR', index=6, number=500, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SERVICE_UNAVAILABLE', index=7, number=503, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=870, - serialized_end=1040, -) -_sym_db.RegisterEnumDescriptor(_STATUS) - -Status = enum_type_wrapper.EnumTypeWrapper(_STATUS) -_HEADERTYPE = _descriptor.EnumDescriptor( - name='HeaderType', - full_name='common.HeaderType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MESSAGE', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONFIG', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONFIG_UPDATE', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ENDORSER_TRANSACTION', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ORDERER_TRANSACTION', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DELIVER_SEEK_INFO', index=5, number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CHAINCODE_PACKAGE', index=6, number=6, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1043, - serialized_end=1196, -) -_sym_db.RegisterEnumDescriptor(_HEADERTYPE) - -HeaderType = enum_type_wrapper.EnumTypeWrapper(_HEADERTYPE) -_BLOCKMETADATAINDEX = _descriptor.EnumDescriptor( - name='BlockMetadataIndex', - full_name='common.BlockMetadataIndex', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='SIGNATURES', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LAST_CONFIG', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TRANSACTIONS_FILTER', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ORDERER', index=3, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1198, - serialized_end=1289, -) -_sym_db.RegisterEnumDescriptor(_BLOCKMETADATAINDEX) - -BlockMetadataIndex = enum_type_wrapper.EnumTypeWrapper(_BLOCKMETADATAINDEX) -UNKNOWN = 0 -SUCCESS = 200 -BAD_REQUEST = 400 -FORBIDDEN = 403 -NOT_FOUND = 404 -REQUEST_ENTITY_TOO_LARGE = 413 -INTERNAL_SERVER_ERROR = 500 -SERVICE_UNAVAILABLE = 503 -MESSAGE = 0 -CONFIG = 1 -CONFIG_UPDATE = 2 -ENDORSER_TRANSACTION = 3 -ORDERER_TRANSACTION = 4 -DELIVER_SEEK_INFO = 5 -CHAINCODE_PACKAGE = 6 -SIGNATURES = 0 -LAST_CONFIG = 1 -TRANSACTIONS_FILTER = 2 -ORDERER = 3 - - - -_LASTCONFIG = _descriptor.Descriptor( - name='LastConfig', - full_name='common.LastConfig', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='index', full_name='common.LastConfig.index', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=64, - serialized_end=91, -) - - -_METADATA = _descriptor.Descriptor( - name='Metadata', - full_name='common.Metadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='value', full_name='common.Metadata.value', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signatures', full_name='common.Metadata.signatures', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=93, - serialized_end=165, -) - - -_METADATASIGNATURE = _descriptor.Descriptor( - name='MetadataSignature', - full_name='common.MetadataSignature', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='signature_header', full_name='common.MetadataSignature.signature_header', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signature', full_name='common.MetadataSignature.signature', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=167, - serialized_end=231, -) - - -_HEADER = _descriptor.Descriptor( - name='Header', - full_name='common.Header', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='channel_header', full_name='common.Header.channel_header', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signature_header', full_name='common.Header.signature_header', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=233, - serialized_end=291, -) - - -_CHANNELHEADER = _descriptor.Descriptor( - name='ChannelHeader', - full_name='common.ChannelHeader', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='common.ChannelHeader.type', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='version', full_name='common.ChannelHeader.version', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp', full_name='common.ChannelHeader.timestamp', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='channel_id', full_name='common.ChannelHeader.channel_id', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tx_id', full_name='common.ChannelHeader.tx_id', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='epoch', full_name='common.ChannelHeader.epoch', index=5, - number=6, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='extension', full_name='common.ChannelHeader.extension', index=6, - number=7, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=294, - serialized_end=456, -) - - -_SIGNATUREHEADER = _descriptor.Descriptor( - name='SignatureHeader', - full_name='common.SignatureHeader', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='creator', full_name='common.SignatureHeader.creator', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='nonce', full_name='common.SignatureHeader.nonce', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=458, - serialized_end=507, -) - - -_PAYLOAD = _descriptor.Descriptor( - name='Payload', - full_name='common.Payload', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='header', full_name='common.Payload.header', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='data', full_name='common.Payload.data', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=509, - serialized_end=564, -) - - -_ENVELOPE = _descriptor.Descriptor( - name='Envelope', - full_name='common.Envelope', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='payload', full_name='common.Envelope.payload', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signature', full_name='common.Envelope.signature', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=566, - serialized_end=612, -) - - -_BLOCK = _descriptor.Descriptor( - name='Block', - full_name='common.Block', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='header', full_name='common.Block.header', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='data', full_name='common.Block.data', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='metadata', full_name='common.Block.metadata', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=614, - serialized_end=732, -) - - -_BLOCKHEADER = _descriptor.Descriptor( - name='BlockHeader', - full_name='common.BlockHeader', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='number', full_name='common.BlockHeader.number', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='previous_hash', full_name='common.BlockHeader.previous_hash', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='data_hash', full_name='common.BlockHeader.data_hash', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=734, - serialized_end=805, -) - - -_BLOCKDATA = _descriptor.Descriptor( - name='BlockData', - full_name='common.BlockData', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='data', full_name='common.BlockData.data', index=0, - number=1, type=12, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=807, - serialized_end=832, -) - - -_BLOCKMETADATA = _descriptor.Descriptor( - name='BlockMetadata', - full_name='common.BlockMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='metadata', full_name='common.BlockMetadata.metadata', index=0, - number=1, type=12, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=834, - serialized_end=867, -) - -_METADATA.fields_by_name['signatures'].message_type = _METADATASIGNATURE -_CHANNELHEADER.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_PAYLOAD.fields_by_name['header'].message_type = _HEADER -_BLOCK.fields_by_name['header'].message_type = _BLOCKHEADER -_BLOCK.fields_by_name['data'].message_type = _BLOCKDATA -_BLOCK.fields_by_name['metadata'].message_type = _BLOCKMETADATA -DESCRIPTOR.message_types_by_name['LastConfig'] = _LASTCONFIG -DESCRIPTOR.message_types_by_name['Metadata'] = _METADATA -DESCRIPTOR.message_types_by_name['MetadataSignature'] = _METADATASIGNATURE -DESCRIPTOR.message_types_by_name['Header'] = _HEADER -DESCRIPTOR.message_types_by_name['ChannelHeader'] = _CHANNELHEADER -DESCRIPTOR.message_types_by_name['SignatureHeader'] = _SIGNATUREHEADER -DESCRIPTOR.message_types_by_name['Payload'] = _PAYLOAD -DESCRIPTOR.message_types_by_name['Envelope'] = _ENVELOPE -DESCRIPTOR.message_types_by_name['Block'] = _BLOCK -DESCRIPTOR.message_types_by_name['BlockHeader'] = _BLOCKHEADER -DESCRIPTOR.message_types_by_name['BlockData'] = _BLOCKDATA -DESCRIPTOR.message_types_by_name['BlockMetadata'] = _BLOCKMETADATA -DESCRIPTOR.enum_types_by_name['Status'] = _STATUS -DESCRIPTOR.enum_types_by_name['HeaderType'] = _HEADERTYPE -DESCRIPTOR.enum_types_by_name['BlockMetadataIndex'] = _BLOCKMETADATAINDEX - -LastConfig = _reflection.GeneratedProtocolMessageType('LastConfig', (_message.Message,), dict( - DESCRIPTOR = _LASTCONFIG, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.LastConfig) - )) -_sym_db.RegisterMessage(LastConfig) - -Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), dict( - DESCRIPTOR = _METADATA, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.Metadata) - )) -_sym_db.RegisterMessage(Metadata) - -MetadataSignature = _reflection.GeneratedProtocolMessageType('MetadataSignature', (_message.Message,), dict( - DESCRIPTOR = _METADATASIGNATURE, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.MetadataSignature) - )) -_sym_db.RegisterMessage(MetadataSignature) - -Header = _reflection.GeneratedProtocolMessageType('Header', (_message.Message,), dict( - DESCRIPTOR = _HEADER, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.Header) - )) -_sym_db.RegisterMessage(Header) - -ChannelHeader = _reflection.GeneratedProtocolMessageType('ChannelHeader', (_message.Message,), dict( - DESCRIPTOR = _CHANNELHEADER, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.ChannelHeader) - )) -_sym_db.RegisterMessage(ChannelHeader) - -SignatureHeader = _reflection.GeneratedProtocolMessageType('SignatureHeader', (_message.Message,), dict( - DESCRIPTOR = _SIGNATUREHEADER, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.SignatureHeader) - )) -_sym_db.RegisterMessage(SignatureHeader) - -Payload = _reflection.GeneratedProtocolMessageType('Payload', (_message.Message,), dict( - DESCRIPTOR = _PAYLOAD, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.Payload) - )) -_sym_db.RegisterMessage(Payload) - -Envelope = _reflection.GeneratedProtocolMessageType('Envelope', (_message.Message,), dict( - DESCRIPTOR = _ENVELOPE, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.Envelope) - )) -_sym_db.RegisterMessage(Envelope) - -Block = _reflection.GeneratedProtocolMessageType('Block', (_message.Message,), dict( - DESCRIPTOR = _BLOCK, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.Block) - )) -_sym_db.RegisterMessage(Block) - -BlockHeader = _reflection.GeneratedProtocolMessageType('BlockHeader', (_message.Message,), dict( - DESCRIPTOR = _BLOCKHEADER, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.BlockHeader) - )) -_sym_db.RegisterMessage(BlockHeader) - -BlockData = _reflection.GeneratedProtocolMessageType('BlockData', (_message.Message,), dict( - DESCRIPTOR = _BLOCKDATA, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.BlockData) - )) -_sym_db.RegisterMessage(BlockData) - -BlockMetadata = _reflection.GeneratedProtocolMessageType('BlockMetadata', (_message.Message,), dict( - DESCRIPTOR = _BLOCKMETADATA, - __module__ = 'common.common_pb2' - # @@protoc_insertion_point(class_scope:common.BlockMetadata) - )) -_sym_db.RegisterMessage(BlockMetadata) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/common')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/common/common_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/common/common_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/common/common_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/common/configtx_pb2.py b/app/platform/fabric/e2e-test/feature/common/configtx_pb2.py deleted file mode 100644 index 00be0dde6..000000000 --- a/app/platform/fabric/e2e-test/feature/common/configtx_pb2.py +++ /dev/null @@ -1,879 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: common/configtx.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from common import common_pb2 as common_dot_common__pb2 -from common import policies_pb2 as common_dot_policies__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='common/configtx.proto', - package='common', - syntax='proto3', - serialized_pb=_b('\n\x15\x63ommon/configtx.proto\x12\x06\x63ommon\x1a\x13\x63ommon/common.proto\x1a\x15\x63ommon/policies.proto\"W\n\x0e\x43onfigEnvelope\x12\x1e\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x0e.common.Config\x12%\n\x0blast_update\x18\x02 \x01(\x0b\x32\x10.common.Envelope\"\x9d\x03\n\x11\x43onfigGroupSchema\x12\x35\n\x06groups\x18\x01 \x03(\x0b\x32%.common.ConfigGroupSchema.GroupsEntry\x12\x35\n\x06values\x18\x02 \x03(\x0b\x32%.common.ConfigGroupSchema.ValuesEntry\x12\x39\n\x08policies\x18\x03 \x03(\x0b\x32\'.common.ConfigGroupSchema.PoliciesEntry\x1aH\n\x0bGroupsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.common.ConfigGroupSchema:\x02\x38\x01\x1aH\n\x0bValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.common.ConfigValueSchema:\x02\x38\x01\x1aK\n\rPoliciesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.common.ConfigPolicySchema:\x02\x38\x01\"\x13\n\x11\x43onfigValueSchema\"\x14\n\x12\x43onfigPolicySchema\"F\n\x06\x43onfig\x12\x10\n\x08sequence\x18\x01 \x01(\x04\x12*\n\rchannel_group\x18\x02 \x01(\x0b\x32\x13.common.ConfigGroup\"Z\n\x14\x43onfigUpdateEnvelope\x12\x15\n\rconfig_update\x18\x01 \x01(\x0c\x12+\n\nsignatures\x18\x02 \x03(\x0b\x32\x17.common.ConfigSignature\"q\n\x0c\x43onfigUpdate\x12\x12\n\nchannel_id\x18\x01 \x01(\t\x12%\n\x08read_set\x18\x02 \x01(\x0b\x32\x13.common.ConfigGroup\x12&\n\twrite_set\x18\x03 \x01(\x0b\x32\x13.common.ConfigGroup\"\x98\x03\n\x0b\x43onfigGroup\x12\x0f\n\x07version\x18\x01 \x01(\x04\x12/\n\x06groups\x18\x02 \x03(\x0b\x32\x1f.common.ConfigGroup.GroupsEntry\x12/\n\x06values\x18\x03 \x03(\x0b\x32\x1f.common.ConfigGroup.ValuesEntry\x12\x33\n\x08policies\x18\x04 \x03(\x0b\x32!.common.ConfigGroup.PoliciesEntry\x12\x12\n\nmod_policy\x18\x05 \x01(\t\x1a\x42\n\x0bGroupsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.common.ConfigGroup:\x02\x38\x01\x1a\x42\n\x0bValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.common.ConfigValue:\x02\x38\x01\x1a\x45\n\rPoliciesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.common.ConfigPolicy:\x02\x38\x01\"A\n\x0b\x43onfigValue\x12\x0f\n\x07version\x18\x01 \x01(\x04\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x12\n\nmod_policy\x18\x03 \x01(\t\"S\n\x0c\x43onfigPolicy\x12\x0f\n\x07version\x18\x01 \x01(\x04\x12\x1e\n\x06policy\x18\x02 \x01(\x0b\x32\x0e.common.Policy\x12\x12\n\nmod_policy\x18\x03 \x01(\t\">\n\x0f\x43onfigSignature\x12\x18\n\x10signature_header\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\x42S\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/commonb\x06proto3') - , - dependencies=[common_dot_common__pb2.DESCRIPTOR,common_dot_policies__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_CONFIGENVELOPE = _descriptor.Descriptor( - name='ConfigEnvelope', - full_name='common.ConfigEnvelope', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='config', full_name='common.ConfigEnvelope.config', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='last_update', full_name='common.ConfigEnvelope.last_update', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=77, - serialized_end=164, -) - - -_CONFIGGROUPSCHEMA_GROUPSENTRY = _descriptor.Descriptor( - name='GroupsEntry', - full_name='common.ConfigGroupSchema.GroupsEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='common.ConfigGroupSchema.GroupsEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.ConfigGroupSchema.GroupsEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=357, - serialized_end=429, -) - -_CONFIGGROUPSCHEMA_VALUESENTRY = _descriptor.Descriptor( - name='ValuesEntry', - full_name='common.ConfigGroupSchema.ValuesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='common.ConfigGroupSchema.ValuesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.ConfigGroupSchema.ValuesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=431, - serialized_end=503, -) - -_CONFIGGROUPSCHEMA_POLICIESENTRY = _descriptor.Descriptor( - name='PoliciesEntry', - full_name='common.ConfigGroupSchema.PoliciesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='common.ConfigGroupSchema.PoliciesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.ConfigGroupSchema.PoliciesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=505, - serialized_end=580, -) - -_CONFIGGROUPSCHEMA = _descriptor.Descriptor( - name='ConfigGroupSchema', - full_name='common.ConfigGroupSchema', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='groups', full_name='common.ConfigGroupSchema.groups', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='values', full_name='common.ConfigGroupSchema.values', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='policies', full_name='common.ConfigGroupSchema.policies', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_CONFIGGROUPSCHEMA_GROUPSENTRY, _CONFIGGROUPSCHEMA_VALUESENTRY, _CONFIGGROUPSCHEMA_POLICIESENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=167, - serialized_end=580, -) - - -_CONFIGVALUESCHEMA = _descriptor.Descriptor( - name='ConfigValueSchema', - full_name='common.ConfigValueSchema', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=582, - serialized_end=601, -) - - -_CONFIGPOLICYSCHEMA = _descriptor.Descriptor( - name='ConfigPolicySchema', - full_name='common.ConfigPolicySchema', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=603, - serialized_end=623, -) - - -_CONFIG = _descriptor.Descriptor( - name='Config', - full_name='common.Config', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='sequence', full_name='common.Config.sequence', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='channel_group', full_name='common.Config.channel_group', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=625, - serialized_end=695, -) - - -_CONFIGUPDATEENVELOPE = _descriptor.Descriptor( - name='ConfigUpdateEnvelope', - full_name='common.ConfigUpdateEnvelope', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='config_update', full_name='common.ConfigUpdateEnvelope.config_update', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signatures', full_name='common.ConfigUpdateEnvelope.signatures', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=697, - serialized_end=787, -) - - -_CONFIGUPDATE = _descriptor.Descriptor( - name='ConfigUpdate', - full_name='common.ConfigUpdate', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='channel_id', full_name='common.ConfigUpdate.channel_id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='read_set', full_name='common.ConfigUpdate.read_set', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='write_set', full_name='common.ConfigUpdate.write_set', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=789, - serialized_end=902, -) - - -_CONFIGGROUP_GROUPSENTRY = _descriptor.Descriptor( - name='GroupsEntry', - full_name='common.ConfigGroup.GroupsEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='common.ConfigGroup.GroupsEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.ConfigGroup.GroupsEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1108, - serialized_end=1174, -) - -_CONFIGGROUP_VALUESENTRY = _descriptor.Descriptor( - name='ValuesEntry', - full_name='common.ConfigGroup.ValuesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='common.ConfigGroup.ValuesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.ConfigGroup.ValuesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1176, - serialized_end=1242, -) - -_CONFIGGROUP_POLICIESENTRY = _descriptor.Descriptor( - name='PoliciesEntry', - full_name='common.ConfigGroup.PoliciesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='common.ConfigGroup.PoliciesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.ConfigGroup.PoliciesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1244, - serialized_end=1313, -) - -_CONFIGGROUP = _descriptor.Descriptor( - name='ConfigGroup', - full_name='common.ConfigGroup', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='version', full_name='common.ConfigGroup.version', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='groups', full_name='common.ConfigGroup.groups', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='values', full_name='common.ConfigGroup.values', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='policies', full_name='common.ConfigGroup.policies', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mod_policy', full_name='common.ConfigGroup.mod_policy', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_CONFIGGROUP_GROUPSENTRY, _CONFIGGROUP_VALUESENTRY, _CONFIGGROUP_POLICIESENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=905, - serialized_end=1313, -) - - -_CONFIGVALUE = _descriptor.Descriptor( - name='ConfigValue', - full_name='common.ConfigValue', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='version', full_name='common.ConfigValue.version', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.ConfigValue.value', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mod_policy', full_name='common.ConfigValue.mod_policy', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1315, - serialized_end=1380, -) - - -_CONFIGPOLICY = _descriptor.Descriptor( - name='ConfigPolicy', - full_name='common.ConfigPolicy', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='version', full_name='common.ConfigPolicy.version', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='policy', full_name='common.ConfigPolicy.policy', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mod_policy', full_name='common.ConfigPolicy.mod_policy', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1382, - serialized_end=1465, -) - - -_CONFIGSIGNATURE = _descriptor.Descriptor( - name='ConfigSignature', - full_name='common.ConfigSignature', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='signature_header', full_name='common.ConfigSignature.signature_header', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signature', full_name='common.ConfigSignature.signature', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1467, - serialized_end=1529, -) - -_CONFIGENVELOPE.fields_by_name['config'].message_type = _CONFIG -_CONFIGENVELOPE.fields_by_name['last_update'].message_type = common_dot_common__pb2._ENVELOPE -_CONFIGGROUPSCHEMA_GROUPSENTRY.fields_by_name['value'].message_type = _CONFIGGROUPSCHEMA -_CONFIGGROUPSCHEMA_GROUPSENTRY.containing_type = _CONFIGGROUPSCHEMA -_CONFIGGROUPSCHEMA_VALUESENTRY.fields_by_name['value'].message_type = _CONFIGVALUESCHEMA -_CONFIGGROUPSCHEMA_VALUESENTRY.containing_type = _CONFIGGROUPSCHEMA -_CONFIGGROUPSCHEMA_POLICIESENTRY.fields_by_name['value'].message_type = _CONFIGPOLICYSCHEMA -_CONFIGGROUPSCHEMA_POLICIESENTRY.containing_type = _CONFIGGROUPSCHEMA -_CONFIGGROUPSCHEMA.fields_by_name['groups'].message_type = _CONFIGGROUPSCHEMA_GROUPSENTRY -_CONFIGGROUPSCHEMA.fields_by_name['values'].message_type = _CONFIGGROUPSCHEMA_VALUESENTRY -_CONFIGGROUPSCHEMA.fields_by_name['policies'].message_type = _CONFIGGROUPSCHEMA_POLICIESENTRY -_CONFIG.fields_by_name['channel_group'].message_type = _CONFIGGROUP -_CONFIGUPDATEENVELOPE.fields_by_name['signatures'].message_type = _CONFIGSIGNATURE -_CONFIGUPDATE.fields_by_name['read_set'].message_type = _CONFIGGROUP -_CONFIGUPDATE.fields_by_name['write_set'].message_type = _CONFIGGROUP -_CONFIGGROUP_GROUPSENTRY.fields_by_name['value'].message_type = _CONFIGGROUP -_CONFIGGROUP_GROUPSENTRY.containing_type = _CONFIGGROUP -_CONFIGGROUP_VALUESENTRY.fields_by_name['value'].message_type = _CONFIGVALUE -_CONFIGGROUP_VALUESENTRY.containing_type = _CONFIGGROUP -_CONFIGGROUP_POLICIESENTRY.fields_by_name['value'].message_type = _CONFIGPOLICY -_CONFIGGROUP_POLICIESENTRY.containing_type = _CONFIGGROUP -_CONFIGGROUP.fields_by_name['groups'].message_type = _CONFIGGROUP_GROUPSENTRY -_CONFIGGROUP.fields_by_name['values'].message_type = _CONFIGGROUP_VALUESENTRY -_CONFIGGROUP.fields_by_name['policies'].message_type = _CONFIGGROUP_POLICIESENTRY -_CONFIGPOLICY.fields_by_name['policy'].message_type = common_dot_policies__pb2._POLICY -DESCRIPTOR.message_types_by_name['ConfigEnvelope'] = _CONFIGENVELOPE -DESCRIPTOR.message_types_by_name['ConfigGroupSchema'] = _CONFIGGROUPSCHEMA -DESCRIPTOR.message_types_by_name['ConfigValueSchema'] = _CONFIGVALUESCHEMA -DESCRIPTOR.message_types_by_name['ConfigPolicySchema'] = _CONFIGPOLICYSCHEMA -DESCRIPTOR.message_types_by_name['Config'] = _CONFIG -DESCRIPTOR.message_types_by_name['ConfigUpdateEnvelope'] = _CONFIGUPDATEENVELOPE -DESCRIPTOR.message_types_by_name['ConfigUpdate'] = _CONFIGUPDATE -DESCRIPTOR.message_types_by_name['ConfigGroup'] = _CONFIGGROUP -DESCRIPTOR.message_types_by_name['ConfigValue'] = _CONFIGVALUE -DESCRIPTOR.message_types_by_name['ConfigPolicy'] = _CONFIGPOLICY -DESCRIPTOR.message_types_by_name['ConfigSignature'] = _CONFIGSIGNATURE - -ConfigEnvelope = _reflection.GeneratedProtocolMessageType('ConfigEnvelope', (_message.Message,), dict( - DESCRIPTOR = _CONFIGENVELOPE, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigEnvelope) - )) -_sym_db.RegisterMessage(ConfigEnvelope) - -ConfigGroupSchema = _reflection.GeneratedProtocolMessageType('ConfigGroupSchema', (_message.Message,), dict( - - GroupsEntry = _reflection.GeneratedProtocolMessageType('GroupsEntry', (_message.Message,), dict( - DESCRIPTOR = _CONFIGGROUPSCHEMA_GROUPSENTRY, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigGroupSchema.GroupsEntry) - )) - , - - ValuesEntry = _reflection.GeneratedProtocolMessageType('ValuesEntry', (_message.Message,), dict( - DESCRIPTOR = _CONFIGGROUPSCHEMA_VALUESENTRY, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigGroupSchema.ValuesEntry) - )) - , - - PoliciesEntry = _reflection.GeneratedProtocolMessageType('PoliciesEntry', (_message.Message,), dict( - DESCRIPTOR = _CONFIGGROUPSCHEMA_POLICIESENTRY, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigGroupSchema.PoliciesEntry) - )) - , - DESCRIPTOR = _CONFIGGROUPSCHEMA, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigGroupSchema) - )) -_sym_db.RegisterMessage(ConfigGroupSchema) -_sym_db.RegisterMessage(ConfigGroupSchema.GroupsEntry) -_sym_db.RegisterMessage(ConfigGroupSchema.ValuesEntry) -_sym_db.RegisterMessage(ConfigGroupSchema.PoliciesEntry) - -ConfigValueSchema = _reflection.GeneratedProtocolMessageType('ConfigValueSchema', (_message.Message,), dict( - DESCRIPTOR = _CONFIGVALUESCHEMA, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigValueSchema) - )) -_sym_db.RegisterMessage(ConfigValueSchema) - -ConfigPolicySchema = _reflection.GeneratedProtocolMessageType('ConfigPolicySchema', (_message.Message,), dict( - DESCRIPTOR = _CONFIGPOLICYSCHEMA, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigPolicySchema) - )) -_sym_db.RegisterMessage(ConfigPolicySchema) - -Config = _reflection.GeneratedProtocolMessageType('Config', (_message.Message,), dict( - DESCRIPTOR = _CONFIG, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.Config) - )) -_sym_db.RegisterMessage(Config) - -ConfigUpdateEnvelope = _reflection.GeneratedProtocolMessageType('ConfigUpdateEnvelope', (_message.Message,), dict( - DESCRIPTOR = _CONFIGUPDATEENVELOPE, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigUpdateEnvelope) - )) -_sym_db.RegisterMessage(ConfigUpdateEnvelope) - -ConfigUpdate = _reflection.GeneratedProtocolMessageType('ConfigUpdate', (_message.Message,), dict( - DESCRIPTOR = _CONFIGUPDATE, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigUpdate) - )) -_sym_db.RegisterMessage(ConfigUpdate) - -ConfigGroup = _reflection.GeneratedProtocolMessageType('ConfigGroup', (_message.Message,), dict( - - GroupsEntry = _reflection.GeneratedProtocolMessageType('GroupsEntry', (_message.Message,), dict( - DESCRIPTOR = _CONFIGGROUP_GROUPSENTRY, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigGroup.GroupsEntry) - )) - , - - ValuesEntry = _reflection.GeneratedProtocolMessageType('ValuesEntry', (_message.Message,), dict( - DESCRIPTOR = _CONFIGGROUP_VALUESENTRY, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigGroup.ValuesEntry) - )) - , - - PoliciesEntry = _reflection.GeneratedProtocolMessageType('PoliciesEntry', (_message.Message,), dict( - DESCRIPTOR = _CONFIGGROUP_POLICIESENTRY, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigGroup.PoliciesEntry) - )) - , - DESCRIPTOR = _CONFIGGROUP, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigGroup) - )) -_sym_db.RegisterMessage(ConfigGroup) -_sym_db.RegisterMessage(ConfigGroup.GroupsEntry) -_sym_db.RegisterMessage(ConfigGroup.ValuesEntry) -_sym_db.RegisterMessage(ConfigGroup.PoliciesEntry) - -ConfigValue = _reflection.GeneratedProtocolMessageType('ConfigValue', (_message.Message,), dict( - DESCRIPTOR = _CONFIGVALUE, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigValue) - )) -_sym_db.RegisterMessage(ConfigValue) - -ConfigPolicy = _reflection.GeneratedProtocolMessageType('ConfigPolicy', (_message.Message,), dict( - DESCRIPTOR = _CONFIGPOLICY, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigPolicy) - )) -_sym_db.RegisterMessage(ConfigPolicy) - -ConfigSignature = _reflection.GeneratedProtocolMessageType('ConfigSignature', (_message.Message,), dict( - DESCRIPTOR = _CONFIGSIGNATURE, - __module__ = 'common.configtx_pb2' - # @@protoc_insertion_point(class_scope:common.ConfigSignature) - )) -_sym_db.RegisterMessage(ConfigSignature) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/common')) -_CONFIGGROUPSCHEMA_GROUPSENTRY.has_options = True -_CONFIGGROUPSCHEMA_GROUPSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -_CONFIGGROUPSCHEMA_VALUESENTRY.has_options = True -_CONFIGGROUPSCHEMA_VALUESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -_CONFIGGROUPSCHEMA_POLICIESENTRY.has_options = True -_CONFIGGROUPSCHEMA_POLICIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -_CONFIGGROUP_GROUPSENTRY.has_options = True -_CONFIGGROUP_GROUPSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -_CONFIGGROUP_VALUESENTRY.has_options = True -_CONFIGGROUP_VALUESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -_CONFIGGROUP_POLICIESENTRY.has_options = True -_CONFIGGROUP_POLICIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/common/configtx_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/common/configtx_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/common/configtx_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/common/configuration_pb2.py b/app/platform/fabric/e2e-test/feature/common/configuration_pb2.py deleted file mode 100644 index afcadc6f4..000000000 --- a/app/platform/fabric/e2e-test/feature/common/configuration_pb2.py +++ /dev/null @@ -1,319 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: common/configuration.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='common/configuration.proto', - package='common', - syntax='proto3', - serialized_pb=_b('\n\x1a\x63ommon/configuration.proto\x12\x06\x63ommon\" \n\x10HashingAlgorithm\x12\x0c\n\x04name\x18\x01 \x01(\t\"*\n\x19\x42lockDataHashingStructure\x12\r\n\x05width\x18\x01 \x01(\r\"%\n\x10OrdererAddresses\x12\x11\n\taddresses\x18\x01 \x03(\t\"\x1a\n\nConsortium\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x95\x01\n\x0c\x43\x61pabilities\x12<\n\x0c\x63\x61pabilities\x18\x01 \x03(\x0b\x32&.common.Capabilities.CapabilitiesEntry\x1aG\n\x11\x43\x61pabilitiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.common.Capability:\x02\x38\x01\"\x0c\n\nCapabilityBS\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/commonb\x06proto3') -) - - - - -_HASHINGALGORITHM = _descriptor.Descriptor( - name='HashingAlgorithm', - full_name='common.HashingAlgorithm', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='common.HashingAlgorithm.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=38, - serialized_end=70, -) - - -_BLOCKDATAHASHINGSTRUCTURE = _descriptor.Descriptor( - name='BlockDataHashingStructure', - full_name='common.BlockDataHashingStructure', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='width', full_name='common.BlockDataHashingStructure.width', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=72, - serialized_end=114, -) - - -_ORDERERADDRESSES = _descriptor.Descriptor( - name='OrdererAddresses', - full_name='common.OrdererAddresses', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='addresses', full_name='common.OrdererAddresses.addresses', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=116, - serialized_end=153, -) - - -_CONSORTIUM = _descriptor.Descriptor( - name='Consortium', - full_name='common.Consortium', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='common.Consortium.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=155, - serialized_end=181, -) - - -_CAPABILITIES_CAPABILITIESENTRY = _descriptor.Descriptor( - name='CapabilitiesEntry', - full_name='common.Capabilities.CapabilitiesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='common.Capabilities.CapabilitiesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.Capabilities.CapabilitiesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=262, - serialized_end=333, -) - -_CAPABILITIES = _descriptor.Descriptor( - name='Capabilities', - full_name='common.Capabilities', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='capabilities', full_name='common.Capabilities.capabilities', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_CAPABILITIES_CAPABILITIESENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=184, - serialized_end=333, -) - - -_CAPABILITY = _descriptor.Descriptor( - name='Capability', - full_name='common.Capability', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=335, - serialized_end=347, -) - -_CAPABILITIES_CAPABILITIESENTRY.fields_by_name['value'].message_type = _CAPABILITY -_CAPABILITIES_CAPABILITIESENTRY.containing_type = _CAPABILITIES -_CAPABILITIES.fields_by_name['capabilities'].message_type = _CAPABILITIES_CAPABILITIESENTRY -DESCRIPTOR.message_types_by_name['HashingAlgorithm'] = _HASHINGALGORITHM -DESCRIPTOR.message_types_by_name['BlockDataHashingStructure'] = _BLOCKDATAHASHINGSTRUCTURE -DESCRIPTOR.message_types_by_name['OrdererAddresses'] = _ORDERERADDRESSES -DESCRIPTOR.message_types_by_name['Consortium'] = _CONSORTIUM -DESCRIPTOR.message_types_by_name['Capabilities'] = _CAPABILITIES -DESCRIPTOR.message_types_by_name['Capability'] = _CAPABILITY -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -HashingAlgorithm = _reflection.GeneratedProtocolMessageType('HashingAlgorithm', (_message.Message,), dict( - DESCRIPTOR = _HASHINGALGORITHM, - __module__ = 'common.configuration_pb2' - # @@protoc_insertion_point(class_scope:common.HashingAlgorithm) - )) -_sym_db.RegisterMessage(HashingAlgorithm) - -BlockDataHashingStructure = _reflection.GeneratedProtocolMessageType('BlockDataHashingStructure', (_message.Message,), dict( - DESCRIPTOR = _BLOCKDATAHASHINGSTRUCTURE, - __module__ = 'common.configuration_pb2' - # @@protoc_insertion_point(class_scope:common.BlockDataHashingStructure) - )) -_sym_db.RegisterMessage(BlockDataHashingStructure) - -OrdererAddresses = _reflection.GeneratedProtocolMessageType('OrdererAddresses', (_message.Message,), dict( - DESCRIPTOR = _ORDERERADDRESSES, - __module__ = 'common.configuration_pb2' - # @@protoc_insertion_point(class_scope:common.OrdererAddresses) - )) -_sym_db.RegisterMessage(OrdererAddresses) - -Consortium = _reflection.GeneratedProtocolMessageType('Consortium', (_message.Message,), dict( - DESCRIPTOR = _CONSORTIUM, - __module__ = 'common.configuration_pb2' - # @@protoc_insertion_point(class_scope:common.Consortium) - )) -_sym_db.RegisterMessage(Consortium) - -Capabilities = _reflection.GeneratedProtocolMessageType('Capabilities', (_message.Message,), dict( - - CapabilitiesEntry = _reflection.GeneratedProtocolMessageType('CapabilitiesEntry', (_message.Message,), dict( - DESCRIPTOR = _CAPABILITIES_CAPABILITIESENTRY, - __module__ = 'common.configuration_pb2' - # @@protoc_insertion_point(class_scope:common.Capabilities.CapabilitiesEntry) - )) - , - DESCRIPTOR = _CAPABILITIES, - __module__ = 'common.configuration_pb2' - # @@protoc_insertion_point(class_scope:common.Capabilities) - )) -_sym_db.RegisterMessage(Capabilities) -_sym_db.RegisterMessage(Capabilities.CapabilitiesEntry) - -Capability = _reflection.GeneratedProtocolMessageType('Capability', (_message.Message,), dict( - DESCRIPTOR = _CAPABILITY, - __module__ = 'common.configuration_pb2' - # @@protoc_insertion_point(class_scope:common.Capability) - )) -_sym_db.RegisterMessage(Capability) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/common')) -_CAPABILITIES_CAPABILITIESENTRY.has_options = True -_CAPABILITIES_CAPABILITIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/common/configuration_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/common/configuration_pb2_grpc.py deleted file mode 100644 index a89435267..000000000 --- a/app/platform/fabric/e2e-test/feature/common/configuration_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - diff --git a/app/platform/fabric/e2e-test/feature/common/ledger_pb2.py b/app/platform/fabric/e2e-test/feature/common/ledger_pb2.py deleted file mode 100644 index e9c45bebd..000000000 --- a/app/platform/fabric/e2e-test/feature/common/ledger_pb2.py +++ /dev/null @@ -1,95 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: common/ledger.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='common/ledger.proto', - package='common', - syntax='proto3', - serialized_pb=_b('\n\x13\x63ommon/ledger.proto\x12\x06\x63ommon\"U\n\x0e\x42lockchainInfo\x12\x0e\n\x06height\x18\x01 \x01(\x04\x12\x18\n\x10\x63urrentBlockHash\x18\x02 \x01(\x0c\x12\x19\n\x11previousBlockHash\x18\x03 \x01(\x0c\x42S\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/commonb\x06proto3') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_BLOCKCHAININFO = _descriptor.Descriptor( - name='BlockchainInfo', - full_name='common.BlockchainInfo', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='height', full_name='common.BlockchainInfo.height', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='currentBlockHash', full_name='common.BlockchainInfo.currentBlockHash', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='previousBlockHash', full_name='common.BlockchainInfo.previousBlockHash', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=31, - serialized_end=116, -) - -DESCRIPTOR.message_types_by_name['BlockchainInfo'] = _BLOCKCHAININFO - -BlockchainInfo = _reflection.GeneratedProtocolMessageType('BlockchainInfo', (_message.Message,), dict( - DESCRIPTOR = _BLOCKCHAININFO, - __module__ = 'common.ledger_pb2' - # @@protoc_insertion_point(class_scope:common.BlockchainInfo) - )) -_sym_db.RegisterMessage(BlockchainInfo) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/common')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/common/ledger_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/common/ledger_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/common/ledger_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/common/policies_pb2.py b/app/platform/fabric/e2e-test/feature/common/policies_pb2.py deleted file mode 100644 index 37aa2e5cc..000000000 --- a/app/platform/fabric/e2e-test/feature/common/policies_pb2.py +++ /dev/null @@ -1,355 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: common/policies.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from msp import msp_principal_pb2 as msp_dot_msp__principal__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='common/policies.proto', - package='common', - syntax='proto3', - serialized_pb=_b('\n\x15\x63ommon/policies.proto\x12\x06\x63ommon\x1a\x17msp/msp_principal.proto\"k\n\x06Policy\x12\x0c\n\x04type\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x0c\"D\n\nPolicyType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tSIGNATURE\x10\x01\x12\x07\n\x03MSP\x10\x02\x12\x11\n\rIMPLICIT_META\x10\x03\"{\n\x17SignaturePolicyEnvelope\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12%\n\x04rule\x18\x02 \x01(\x0b\x32\x17.common.SignaturePolicy\x12(\n\nidentities\x18\x03 \x03(\x0b\x32\x14.common.MSPPrincipal\"\x9f\x01\n\x0fSignaturePolicy\x12\x13\n\tsigned_by\x18\x01 \x01(\x05H\x00\x12\x32\n\x08n_out_of\x18\x02 \x01(\x0b\x32\x1e.common.SignaturePolicy.NOutOfH\x00\x1a;\n\x06NOutOf\x12\t\n\x01n\x18\x01 \x01(\x05\x12&\n\x05rules\x18\x02 \x03(\x0b\x32\x17.common.SignaturePolicyB\x06\n\x04Type\"\x7f\n\x12ImplicitMetaPolicy\x12\x12\n\nsub_policy\x18\x01 \x01(\t\x12-\n\x04rule\x18\x02 \x01(\x0e\x32\x1f.common.ImplicitMetaPolicy.Rule\"&\n\x04Rule\x12\x07\n\x03\x41NY\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\x0c\n\x08MAJORITY\x10\x02\x42S\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/commonb\x06proto3') - , - dependencies=[msp_dot_msp__principal__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_POLICY_POLICYTYPE = _descriptor.EnumDescriptor( - name='PolicyType', - full_name='common.Policy.PolicyType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='UNKNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SIGNATURE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MSP', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='IMPLICIT_META', index=3, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=97, - serialized_end=165, -) -_sym_db.RegisterEnumDescriptor(_POLICY_POLICYTYPE) - -_IMPLICITMETAPOLICY_RULE = _descriptor.EnumDescriptor( - name='Rule', - full_name='common.ImplicitMetaPolicy.Rule', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='ANY', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ALL', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MAJORITY', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=543, - serialized_end=581, -) -_sym_db.RegisterEnumDescriptor(_IMPLICITMETAPOLICY_RULE) - - -_POLICY = _descriptor.Descriptor( - name='Policy', - full_name='common.Policy', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='common.Policy.type', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='common.Policy.value', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _POLICY_POLICYTYPE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=58, - serialized_end=165, -) - - -_SIGNATUREPOLICYENVELOPE = _descriptor.Descriptor( - name='SignaturePolicyEnvelope', - full_name='common.SignaturePolicyEnvelope', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='version', full_name='common.SignaturePolicyEnvelope.version', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rule', full_name='common.SignaturePolicyEnvelope.rule', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='identities', full_name='common.SignaturePolicyEnvelope.identities', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=167, - serialized_end=290, -) - - -_SIGNATUREPOLICY_NOUTOF = _descriptor.Descriptor( - name='NOutOf', - full_name='common.SignaturePolicy.NOutOf', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='n', full_name='common.SignaturePolicy.NOutOf.n', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rules', full_name='common.SignaturePolicy.NOutOf.rules', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=385, - serialized_end=444, -) - -_SIGNATUREPOLICY = _descriptor.Descriptor( - name='SignaturePolicy', - full_name='common.SignaturePolicy', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='signed_by', full_name='common.SignaturePolicy.signed_by', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='n_out_of', full_name='common.SignaturePolicy.n_out_of', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_SIGNATUREPOLICY_NOUTOF, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='Type', full_name='common.SignaturePolicy.Type', - index=0, containing_type=None, fields=[]), - ], - serialized_start=293, - serialized_end=452, -) - - -_IMPLICITMETAPOLICY = _descriptor.Descriptor( - name='ImplicitMetaPolicy', - full_name='common.ImplicitMetaPolicy', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='sub_policy', full_name='common.ImplicitMetaPolicy.sub_policy', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rule', full_name='common.ImplicitMetaPolicy.rule', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _IMPLICITMETAPOLICY_RULE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=454, - serialized_end=581, -) - -_POLICY_POLICYTYPE.containing_type = _POLICY -_SIGNATUREPOLICYENVELOPE.fields_by_name['rule'].message_type = _SIGNATUREPOLICY -_SIGNATUREPOLICYENVELOPE.fields_by_name['identities'].message_type = msp_dot_msp__principal__pb2._MSPPRINCIPAL -_SIGNATUREPOLICY_NOUTOF.fields_by_name['rules'].message_type = _SIGNATUREPOLICY -_SIGNATUREPOLICY_NOUTOF.containing_type = _SIGNATUREPOLICY -_SIGNATUREPOLICY.fields_by_name['n_out_of'].message_type = _SIGNATUREPOLICY_NOUTOF -_SIGNATUREPOLICY.oneofs_by_name['Type'].fields.append( - _SIGNATUREPOLICY.fields_by_name['signed_by']) -_SIGNATUREPOLICY.fields_by_name['signed_by'].containing_oneof = _SIGNATUREPOLICY.oneofs_by_name['Type'] -_SIGNATUREPOLICY.oneofs_by_name['Type'].fields.append( - _SIGNATUREPOLICY.fields_by_name['n_out_of']) -_SIGNATUREPOLICY.fields_by_name['n_out_of'].containing_oneof = _SIGNATUREPOLICY.oneofs_by_name['Type'] -_IMPLICITMETAPOLICY.fields_by_name['rule'].enum_type = _IMPLICITMETAPOLICY_RULE -_IMPLICITMETAPOLICY_RULE.containing_type = _IMPLICITMETAPOLICY -DESCRIPTOR.message_types_by_name['Policy'] = _POLICY -DESCRIPTOR.message_types_by_name['SignaturePolicyEnvelope'] = _SIGNATUREPOLICYENVELOPE -DESCRIPTOR.message_types_by_name['SignaturePolicy'] = _SIGNATUREPOLICY -DESCRIPTOR.message_types_by_name['ImplicitMetaPolicy'] = _IMPLICITMETAPOLICY - -Policy = _reflection.GeneratedProtocolMessageType('Policy', (_message.Message,), dict( - DESCRIPTOR = _POLICY, - __module__ = 'common.policies_pb2' - # @@protoc_insertion_point(class_scope:common.Policy) - )) -_sym_db.RegisterMessage(Policy) - -SignaturePolicyEnvelope = _reflection.GeneratedProtocolMessageType('SignaturePolicyEnvelope', (_message.Message,), dict( - DESCRIPTOR = _SIGNATUREPOLICYENVELOPE, - __module__ = 'common.policies_pb2' - # @@protoc_insertion_point(class_scope:common.SignaturePolicyEnvelope) - )) -_sym_db.RegisterMessage(SignaturePolicyEnvelope) - -SignaturePolicy = _reflection.GeneratedProtocolMessageType('SignaturePolicy', (_message.Message,), dict( - - NOutOf = _reflection.GeneratedProtocolMessageType('NOutOf', (_message.Message,), dict( - DESCRIPTOR = _SIGNATUREPOLICY_NOUTOF, - __module__ = 'common.policies_pb2' - # @@protoc_insertion_point(class_scope:common.SignaturePolicy.NOutOf) - )) - , - DESCRIPTOR = _SIGNATUREPOLICY, - __module__ = 'common.policies_pb2' - # @@protoc_insertion_point(class_scope:common.SignaturePolicy) - )) -_sym_db.RegisterMessage(SignaturePolicy) -_sym_db.RegisterMessage(SignaturePolicy.NOutOf) - -ImplicitMetaPolicy = _reflection.GeneratedProtocolMessageType('ImplicitMetaPolicy', (_message.Message,), dict( - DESCRIPTOR = _IMPLICITMETAPOLICY, - __module__ = 'common.policies_pb2' - # @@protoc_insertion_point(class_scope:common.ImplicitMetaPolicy) - )) -_sym_db.RegisterMessage(ImplicitMetaPolicy) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n$org.hyperledger.fabric.protos.commonZ+github.com/hyperledger/fabric/protos/common')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/common/policies_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/common/policies_pb2_grpc.py deleted file mode 100644 index a89435267..000000000 --- a/app/platform/fabric/e2e-test/feature/common/policies_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - diff --git a/app/platform/fabric/e2e-test/feature/configs/configtx.yaml b/app/platform/fabric/e2e-test/feature/configs/configtx.yaml deleted file mode 100644 index 24f15bd6a..000000000 --- a/app/platform/fabric/e2e-test/feature/configs/configtx.yaml +++ /dev/null @@ -1,510 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# ---- -################################################################################ -# -# SECTION: Capabilities -# -# - This section defines the capabilities of fabric network. This is a new -# concept as of v1.1.0 and should not be utilized in mixed networks with -# v1.0.x peers and orderers. Capabilities define features which must be -# present in a fabric binary for that binary to safely participate in the -# fabric network. For instance, if a new MSP type is added, newer binaries -# might recognize and validate the signatures from this type, while older -# binaries without this support would be unable to validate those -# transactions. This could lead to different versions of the fabric binaries -# having different world states. Instead, defining a capability for a channel -# informs those binaries without this capability that they must cease -# processing transactions until they have been upgraded. For v1.0.x if any -# capabilities are defined (including a map with all capabilities turned off) -# then the v1.0.x peer will deliberately crash. -# -################################################################################ -Capabilities: - # Channel capabilities apply to both the orderers and the peers and must be - # supported by both. Set the value of the capability to true to require it. - Global: &ChannelCapabilities - ### I think these recent capabilities wont work unless we update protobufs - ###V1_4_3: true - ###V1_4_2: true - ###V1_3: true - # (V1_2 not defined in fabric) - # Note: older ones are superceded, e.g. V1_3 capabilities include V1_1. - # Thus, those like V1_1 need not be set if V1_3 or later is set true. - # But keep them, to allow disabling V1_3 when bringing up a new test - # network, if desired, possibly in anticipation of an upgrade test. - V1_1: true - - # Orderer capabilities apply only to the orderers, and may be safely - # manipulated without concern for upgrading peers. Set the value of the - # capability to true to require it. - Orderer: &OrdererCapabilities - ### I think these recent capabilities wont work unless we update protobufs - ###V1_4_2: true - # (V1_3 not defined in fabric) - # (V1_2 not defined in fabric) - V1_1: true - - # Application capabilities apply only to the peer network, and may be safely - # manipulated without concern for upgrading orderers. Set the value of the - # capability to true to require it. - Application: &ApplicationCapabilities - ### I think these recent capabilities wont work unless we update protobufs - ###V1_4_2: true - ###V1_3: true - V1_2: true - # Note: older ones are superceded, e.g. V1_2 capabilities include V1_1. - # Thus, older ones like V1_1 need not be set if we set V1_2 or later. - # But keep them all; that would allow tester to override and disable - # newer ones like Capabilities.Global.V1_3 and - # Capabilities.Application.V1_3 when bringing up a new test network, - # if desired, possibly in anticipation of an upgrade test. - V1_1: true - -################################################################################ -# -# Section: Organizations -# -# - This section defines the different organizational identities which will -# be referenced later in the configuration. -# -################################################################################ -Organizations: - - - &ExampleCom - Name: ExampleCom - ID: example.com - AdminPrincipal: Role.ADMIN - MSPDir: ./ordererOrganizations/example.com/msp - Policies: - Readers: - Type: Signature - Rule: OR('example.com.member') - Writers: - Type: Signature - Rule: OR('example.com.member') - Admins: - Type: Signature - Rule: OR('example.com.admin') - Endorsement: - Type: Signature - Rule: OR('example.com.member') - - - &Org1ExampleCom - Name: Org1ExampleCom - ID: org1.example.com - MSPDir: ./peerOrganizations/org1.example.com/msp - AdminPrincipal: Role.ADMIN - AnchorPeers: - - Host: peer0.org1.example.com - Port: 7051 - Policies: - Readers: - Type: Signature - Rule: OR('org1.example.com.member') - Writers: - Type: Signature - Rule: OR('org1.example.com.member') - Admins: - Type: Signature - Rule: OR('org1.example.com.admin') - Endorsement: - Type: Signature - Rule: OR('org1.example.com.member') - - - &Org2ExampleCom - Name: Org2ExampleCom - ID: org2.example.com - MSPDir: ./peerOrganizations/org2.example.com/msp - AdminPrincipal: Role.ADMIN - AnchorPeers: - - Host: peer0.org2.example.com - Port: 7051 - Policies: - Readers: - Type: Signature - Rule: OR('org2.example.com.member') - Writers: - Type: Signature - Rule: OR('org2.example.com.member') - Admins: - Type: Signature - Rule: OR('org2.example.com.admin') - Endorsement: - Type: Signature - Rule: OR('org2.example.com.member') - -################################################################################ -# -# SECTION: Orderer -# -# - This section defines the values to encode into a config transaction or -# genesis block for orderer related parameters. -# -################################################################################ -Orderer: &OrdererDefaults - - # Orderer Type: The orderer implementation to start. - # Available types are "solo" and "kafka". - OrdererType: solo - - Addresses: - - orderer0.example.com:7050 - - # Batch Timeout: The amount of time to wait before creating a batch. - BatchTimeout: 2s - - # Batch Size: Controls the number of messages batched into a block. - BatchSize: - - # Max Message Count: The maximum number of messages to permit in a - # batch. - MaxMessageCount: 10 - - # Absolute Max Bytes: The absolute maximum number of bytes allowed for - # the serialized messages in a batch. If the "kafka" OrdererType is - # selected, set 'message.max.bytes' and 'replica.fetch.max.bytes' on the - # Kafka brokers to a value that is larger than this one. - AbsoluteMaxBytes: 98 MB - - # Preferred Max Bytes: The preferred maximum number of bytes allowed for - # the serialized messages in a batch. A message larger than the - # preferred max bytes will result in a batch larger than preferred max - # bytes. - PreferredMaxBytes: 512 KB - - # Max Channels is the maximum number of channels to allow on the ordering - # network. When set to 0, this implies no maximum number of channels. - MaxChannels: 0 - - Kafka: - # Brokers: A list of Kafka brokers to which the orderer connects. Edit - # this list to identify the brokers of the ordering service. - # NOTE: Use IP:port notation. - Brokers: - - kafka0:9092 - - kafka1:9092 - - kafka2:9092 - - kafka3:9092 - - # Organizations is the list of orgs which are defined as participants on - # the orderer side of the network. - Organizations: - - # Policies defines the set of policies at this level of the config tree - # For Orderer policies, their canonical path is - # /Channel/Orderer/ - Policies: - Readers: - Type: ImplicitMeta - Rule: ANY Readers - Writers: - Type: ImplicitMeta - Rule: ANY Writers - Admins: - Type: ImplicitMeta - Rule: MAJORITY Admins - # BlockValidation specifies what signatures must be included in the block - # from the orderer for the peer to validate it. - BlockValidation: - Type: ImplicitMeta - Rule: ANY Writers - - # Capabilities describes the orderer level capabilities, see the - # dedicated Capabilities section elsewhere in this file for a full - # description - Capabilities: - <<: *OrdererCapabilities - -################################################################################ -# -# CHANNEL -# -# This section defines the values to encode into a config transaction or -# genesis block for channel related parameters. -# -################################################################################ -Channel: &ChannelDefaults - # Policies defines the set of policies at this level of the config tree - # For Channel policies, their canonical path is - # /Channel/ - Policies: - # Who may invoke the 'Deliver' API - Readers: - Type: ImplicitMeta - Rule: ANY Readers - # Who may invoke the 'Broadcast' API - Writers: - Type: ImplicitMeta - Rule: ANY Writers - # By default, who may modify elements at this config level - Admins: - Type: ImplicitMeta - Rule: MAJORITY Admins - - - # Capabilities describes the channel level capabilities, see the - # dedicated Capabilities section elsewhere in this file for a full - # description - Capabilities: - <<: *ChannelCapabilities - -################################################################################ -# -# SECTION: Application -# -# - This section defines the values to encode into a config transaction or -# genesis block for application related parameters. -# -################################################################################ -Application: &ApplicationDefaults - ACLs: &ACLsDefault - # This section provides defaults for policies for various resources - # in the system. These "resources" could be functions on system chaincodes - # (e.g., "GetBlockByNumber" on the "qscc" system chaincode) or other resources - # (e.g.,who can receive Block events). This section does NOT specify the resource's - # definition or API, but just the ACL policy for it. - # - # User's can override these defaults with their own policy mapping by defining the - # mapping under ACLs in their channel definition - - #---New Lifecycle System Chaincode (_lifecycle) function to policy mapping for access control--# - - # ACL policy for _lifecycle's "CommitChaincodeDefinition" function - _lifecycle/CommitChaincodeDefinition: /Channel/Application/Writers - - # ACL policy for _lifecycle's "QueryChaincodeDefinition" function - _lifecycle/QueryChaincodeDefinition: /Channel/Application/Readers - - # ACL policy for _lifecycle's "QueryNamespaceDefinitions" function - _lifecycle/QueryNamespaceDefinitions: /Channel/Application/Readers - - #---Lifecycle System Chaincode (lscc) function to policy mapping for access control---# - - # ACL policy for lscc's "getid" function - lscc/ChaincodeExists: /Channel/Application/Readers - - # ACL policy for lscc's "getdepspec" function - lscc/GetDeploymentSpec: /Channel/Application/Readers - - # ACL policy for lscc's "getccdata" function - lscc/GetChaincodeData: /Channel/Application/Readers - - # ACL Policy for lscc's "getchaincodes" function - lscc/GetInstantiatedChaincodes: /Channel/Application/Readers - - #---Query System Chaincode (qscc) function to policy mapping for access control---# - - # ACL policy for qscc's "GetChainInfo" function - qscc/GetChainInfo: /Channel/Application/Readers - - # ACL policy for qscc's "GetBlockByNumber" function - qscc/GetBlockByNumber: /Channel/Application/Readers - - # ACL policy for qscc's "GetBlockByHash" function - qscc/GetBlockByHash: /Channel/Application/Readers - - # ACL policy for qscc's "GetTransactionByID" function - qscc/GetTransactionByID: /Channel/Application/Readers - - # ACL policy for qscc's "GetBlockByTxID" function - qscc/GetBlockByTxID: /Channel/Application/Readers - - #---Configuration System Chaincode (cscc) function to policy mapping for access control---# - - # ACL policy for cscc's "GetConfigBlock" function - cscc/GetConfigBlock: /Channel/Application/Readers - - # ACL policy for cscc's "GetConfigTree" function - cscc/GetConfigTree: /Channel/Application/Readers - - # ACL policy for cscc's "SimulateConfigTreeUpdate" function - cscc/SimulateConfigTreeUpdate: /Channel/Application/Readers - - #---Miscellanesous peer function to policy mapping for access control---# - - # ACL policy for invoking chaincodes on peer - peer/Propose: /Channel/Application/Writers - - # ACL policy for chaincode to chaincode invocation - peer/ChaincodeToChaincode: /Channel/Application/Readers - - #---Events resource to policy mapping for access control###---# - - # ACL policy for sending block events - event/Block: /Channel/Application/Readers - - # ACL policy for sending filtered block events - event/FilteredBlock: /Channel/Application/Readers - - # Organizations is the list of orgs which are defined as participants on - # the application side of the network. - Organizations: - - # Policies defines the set of policies at this level of the config tree - # For Application policies, their canonical path is - # /Channel/Application/ - Policies: &ApplicationDefaultPolicies - Readers: - Type: ImplicitMeta - Rule: "ANY Readers" - Writers: - Type: ImplicitMeta - Rule: "ANY Writers" - Admins: - Type: ImplicitMeta - Rule: "MAJORITY Admins" - - # Capabilities describes the application level capabilities, see the - # dedicated Capabilities section elsewhere in this file for a full - # description - Capabilities: - <<: *ApplicationCapabilities - -################################################################################ -# -# Profiles -# -# - Different configuration profiles may be encoded here to be specified -# as parameters to the configtxgen tool. The profiles which specify consortiums -# are to be used for generating the orderer genesis block. With the correct -# consortium members defined in the orderer genesis block, channel creation -# requests may be generated with only the org member names and a consortium name -# -################################################################################ -Profiles: - - # SampleInsecureSolo defines a configuration which uses the Solo orderer, - # contains no MSP definitions, and allows all transactions and channel - # creation requests for the consortium SampleConsortium. - SampleInsecureSolo: - <<: *ChannelDefaults - Orderer: - <<: *OrdererDefaults - Organizations: - - *ExampleCom - Capabilities: - <<: *OrdererCapabilities - Application: - <<: *ApplicationDefaults - Organizations: - - *ExampleCom - Capabilities: - <<: *ApplicationCapabilities - Consortiums: - SampleConsortium: - Organizations: - - *Org1ExampleCom - - *Org2ExampleCom - - # SampleInsecureKafka defines a configuration that differs from the - # SampleInsecureSolo one only in that is uses the Kafka-based orderer. - SampleInsecureKafka: - <<: *ChannelDefaults - Orderer: - <<: *OrdererDefaults - OrdererType: kafka - Addresses: - - orderer0.example.com:7050 - - orderer1.example.com:7050 - - orderer2.example.com:7050 - Organizations: - - *ExampleCom - Capabilities: - <<: *OrdererCapabilities - Application: - <<: *ApplicationDefaults - Organizations: - - *ExampleCom - Capabilities: - <<: *ApplicationCapabilities - Consortiums: - SampleConsortium: - Organizations: - - *ExampleCom - - *Org1ExampleCom - - *Org2ExampleCom - - # SampleSingleMSPSolo defines a configuration which uses the Solo orderer, - # and contains a single MSP definition (the MSP sampleconfig). - # The Consortium SampleConsortium has only a single member, SampleOrg - SampleSingleMSPSolo: - <<: *ChannelDefaults - Orderer: - <<: *OrdererDefaults - Organizations: - - *ExampleCom - Capabilities: - <<: *OrdererCapabilities - Application: - <<: *ApplicationDefaults - Organizations: - - *ExampleCom - Capabilities: - <<: *ApplicationCapabilities - Consortiums: - SampleConsortium: - Organizations: - - *ExampleCom - - *Org1ExampleCom - - *Org2ExampleCom - - # SampleEmptyInsecureChannel defines a channel with no members - # and therefore no access control - SampleEmptyInsecureChannel: - <<: *ChannelDefaults - Consortium: SampleConsortium - Application: - Organizations: - - *ExampleCom - Capabilities: - <<: *ApplicationCapabilities - Policies: - Readers: - Type: ImplicitMeta - Rule: ANY Readers - Writers: - Type: ImplicitMeta - Rule: ANY Writers - Admins: - Type: ImplicitMeta - Rule: MAJORITY Admins - LifecycleEndorsement: - Type: ImplicitMeta - Rule: "MAJORITY Endorsement" - Endorsement: - Type: ImplicitMeta - Rule: "MAJORITY Endorsement" - - # SysTestChannel defines a channel for use with the System Test Orgs - SysTestChannel: - <<: *ChannelDefaults - Capabilities: - <<: *ChannelCapabilities - Consortium: SampleConsortium - Application: - <<: *ApplicationDefaults - Organizations: - - *Org1ExampleCom - - *Org2ExampleCom - Capabilities: - <<: *ApplicationCapabilities - - # SampleSingleMSPChannel defines a channel with only the sample org as a - # member. It is designed to be used in conjunction with SampleSingleMSPSolo - # and SampleSingleMSPKafka orderer profiles - SampleSingleMSPChannel: - <<: *ChannelDefaults - Capabilities: - <<: *ChannelCapabilities - Consortium: SampleConsortium - Application: - <<: *ApplicationDefaults - Organizations: - - *Org1ExampleCom - - *Org2ExampleCom - Capabilities: - <<: *ApplicationCapabilities - diff --git a/app/platform/fabric/e2e-test/feature/configs/crypto.yaml b/app/platform/fabric/e2e-test/feature/configs/crypto.yaml deleted file mode 100644 index e283dac10..000000000 --- a/app/platform/fabric/e2e-test/feature/configs/crypto.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -OrdererOrgs: - - Name: ExampleCom - Domain: example.com - #EnableNodeOUs: true - CA: - SANS: - - localhost - Specs: - - Hostname: orderer0 - - Hostname: orderer1 - - Hostname: orderer2 - -PeerOrgs: - - Name: Org1ExampleCom - Domain: org1.example.com - EnableNodeOUs: false - #EnableNodeOUs: true - CA: - SANS: - - localhost - Template: - Count: 2 - SANS: - - localhost - Users: - Count: 1 - - - Name: Org2ExampleCom - Domain: org2.example.com - EnableNodeOUs: false - #EnableNodeOUs: true - CA: - SANS: - - localhost - Template: - Count: 2 - SANS: - - localhost - Users: - Count: 1 diff --git a/app/platform/fabric/e2e-test/feature/configs/fabric-ca-server-config.yaml b/app/platform/fabric/e2e-test/feature/configs/fabric-ca-server-config.yaml deleted file mode 100644 index d596ff7b9..000000000 --- a/app/platform/fabric/e2e-test/feature/configs/fabric-ca-server-config.yaml +++ /dev/null @@ -1,351 +0,0 @@ -############################################################################# -# This is a configuration file for the fabric-ca-server command. -# -# COMMAND LINE ARGUMENTS AND ENVIRONMENT VARIABLES -# ------------------------------------------------ -# Each configuration element can be overridden via command line -# arguments or environment variables. The precedence for determining -# the value of each element is as follows: -# 1) command line argument -# Examples: -# a) --port 443 -# To set the listening port -# b) --ca-keyfile ../mykey.pem -# To set the "keyfile" element in the "ca" section below; -# note the '-' separator character. -# 2) environment variable -# Examples: -# a) FABRIC_CA_SERVER_PORT=443 -# To set the listening port -# b) FABRIC_CA_SERVER_CA_KEYFILE="../mykey.pem" -# To set the "keyfile" element in the "ca" section below; -# note the '_' separator character. -# 3) configuration file -# 4) default value (if there is one) -# All default values are shown beside each element below. -# -# FILE NAME ELEMENTS -# ------------------ -# The value of all fields whose name ends with "file" or "files" are -# name or names of other files. -# For example, see "tls.certfile" and "tls.clientauth.certfiles". -# The value of each of these fields can be a simple filename, a -# relative path, or an absolute path. If the value is not an -# absolute path, it is interpretted as being relative to the location -# of this configuration file. -# -############################################################################# - -# Server's listening port (default: 7054) -port: 7054 - -# Enables debug logging (default: false) -debug: false - -# Size limit of an acceptable CRL in bytes (default: 512000) -crlsizelimit: 512000 - -############################################################################# -# TLS section for the server's listening port -# -# The following types are supported for client authentication: NoClientCert, -# RequestClientCert, RequireAnyClientCert, VerifyClientCertIfGiven, -# and RequireAndVerifyClientCert. -# -# Certfiles is a list of root certificate authorities that the server uses -# when verifying client certificates. -############################################################################# -tls: - # Enable TLS (default: false) - enabled: true - # TLS for the server's listening port - certfile: tls-cert.pem - keyfile: - clientauth: - type: noclientcert - certfiles: - -############################################################################# -# The CA section contains information related to the Certificate Authority -# including the name of the CA, which should be unique for all members -# of a blockchain network. It also includes the key and certificate files -# used when issuing enrollment certificates (ECerts) and transaction -# certificates (TCerts). -# The chainfile (if it exists) contains the certificate chain which -# should be trusted for this CA, where the 1st in the chain is always the -# root CA certificate. -############################################################################# -ca: - # Name of this CA - name: - # Key file (default: ca-key.pem) - keyfile: ca-key.pem - # Certificate file (default: ca-cert.pem) - certfile: ca-cert.pem - # Chain file (default: chain-cert.pem) - chainfile: ca-chain.pem - -############################################################################# -# The gencrl REST endpoint is used to generate a CRL that contains revoked -# certificates. This section contains configuration options that are used -# during gencrl request processing. -############################################################################# -crl: - # Specifies expiration for the generated CRL. The number of hours - # specified by this property is added to the UTC time, the resulting time - # is used to set the 'Next Update' date of the CRL. - expiry: 24h - -############################################################################# -# The registry section controls how the fabric-ca-server does two things: -# 1) authenticates enrollment requests which contain a username and password -# (also known as an enrollment ID and secret). -# 2) once authenticated, retrieves the identity's attribute names and -# values which the fabric-ca-server optionally puts into TCerts -# which it issues for transacting on the Hyperledger Fabric blockchain. -# These attributes are useful for making access control decisions in -# chaincode. -# There are two main configuration options: -# 1) The fabric-ca-server is the registry. -# This is true if "ldap.enabled" in the ldap section below is false. -# 2) An LDAP server is the registry, in which case the fabric-ca-server -# calls the LDAP server to perform these tasks. -# This is true if "ldap.enabled" in the ldap section below is true, -# which means this "registry" section is ignored. -############################################################################# -registry: - # Maximum number of times a password/secret can be reused for enrollment - # (default: -1, which means there is no limit) - maxenrollments: -1 - - # Contains identity information which is used when LDAP is disabled - identities: - #- name: {orgName}-admin - # pass: {orgName}-adminpw - #- name: admin - - name: Admin - pass: adminpw - type: client - affiliation: "" - maxenrollments: -1 - attrs: - hf.Registrar.Roles: "client,user,peer,validator,auditor" - hf.Registrar.DelegateRoles: "client,user,validator,auditor" - hf.Revoker: true - hf.IntermediateCA: false - hf.GenCRL: true - hf.Registrar.Attributes: "*" - -############################################################################# -# Database section -# Supported types are: "sqlite3", "postgres", and "mysql". -# The datasource value depends on the type. -# If the type is "sqlite3", the datasource value is a file name to use -# as the database store. Since "sqlite3" is an embedded database, it -# may not be used if you want to run the fabric-ca-server in a cluster. -# To run the fabric-ca-server in a cluster, you must choose "postgres" -# or "mysql". -############################################################################# -db: - type: sqlite3 - datasource: fabric-ca-server.db - tls: - enabled: true - certfiles: - - db-server-cert.pem - client: - certfile: db-client-cert.pem - keyfile: db-client-key.pem - -############################################################################# -# LDAP section -# If LDAP is enabled, the fabric-ca-server calls LDAP to: -# 1) authenticate enrollment ID and secret (i.e. username and password) -# for enrollment requests; -# 2) To retrieve identity attributes -############################################################################# -ldap: - # Enables or disables the LDAP client (default: false) - # If this is set to true, the "registry" section is ignored. - enabled: false - # The URL of the LDAP server - url: ldap://:@:/ - tls: - certfiles: - - ldap-server-cert.pem - client: - certfile: ldap-client-cert.pem - keyfile: ldap-client-key.pem - -############################################################################# -# Affiliation section -############################################################################# -affiliations: - example.com: - org1.example.com: - org2.example.com: - org1: - - example.com - org2: - - example.com - -############################################################################# -# Signing section -# -# The "default" subsection is used to sign enrollment certificates; -# the default expiration ("expiry" field) is "8760h", which is 1 year in hours. -# -# The "ca" profile subsection is used to sign intermediate CA certificates; -# the default expiration ("expiry" field) is "43800h" which is 5 years in hours. -# Note that "isca" is true, meaning that it issues a CA certificate. -# A maxpathlen of 0 means that the intermediate CA cannot issue other -# intermediate CA certificates, though it can still issue end entity certificates. -# (See RFC 5280, section 4.2.1.9) -# -# The "tls" profile subsection is used to sign TLS certificate requests; -# the default expiration ("expiry" field) is "8760h", which is 1 year in hours. -############################################################################# -signing: - default: - usage: - - digital signature - expiry: 8760h - profiles: - ca: - usage: - - cert sign - expiry: 43800h - caconstraint: - isca: true - maxpathlen: 0 - tls: - usage: - - signing - - key encipherment - - server auth - - client auth - - key agreement - expiry: 8760h - -########################################################################### -# Certificate Signing Request (CSR) section. -# This controls the creation of the root CA certificate. -# The expiration for the root CA certificate is configured with the -# "ca.expiry" field below, whose default value is "131400h" which is -# 15 years in hours. -# The pathlength field is used to limit CA certificate hierarchy as described -# in section 4.2.1.9 of RFC 5280. -# Examples: -# 1) No pathlength value means no limit is requested. -# 2) pathlength == 1 means a limit of 1 is requested which is the default for -# a root CA. This means the root CA can issue intermediate CA certificates, -# but these intermediate CAs may not in turn issue other CA certificates -# though they can still issue end entity certificates. -# 3) pathlength == 0 means a limit of 0 is requested; -# this is the default for an intermediate CA, which means it can not issue -# CA certificates though it can still issue end entity certificates. -########################################################################### -csr: - #cn: fabric-ca-server - cn: {orgName} - names: - - C: US - ST: "North Carolina" - L: - O: Hyperledger - OU: Fabric - hosts: - - 48d614b43b5d - - localhost - - ca.{orgName} - ca: - expiry: 131400h - pathlength: 1 - -############################################################################# -# BCCSP (BlockChain Crypto Service Provider) section is used to select which -# crypto library implementation to use -############################################################################# -bccsp: - default: SW - sw: - hash: SHA2 - security: 256 - filekeystore: - # The directory used for the software file-based keystore - keystore: msp/keystore - -############################################################################# -# Multi CA section -# -# Each Fabric CA server contains one CA by default. This section is used -# to configure multiple CAs in a single server. -# -# 1) --cacount -# Automatically generate non-default CAs. The names of these -# additional CAs are "ca1", "ca2", ... "caN", where "N" is -# This is particularly useful in a development environment to quickly set up -# multiple CAs. -# -# 2) --cafiles -# For each CA config file in the list, generate a separate signing CA. Each CA -# config file in this list MAY contain all of the same elements as are found in -# the server config file except port, debug, and tls sections. -# -# Examples: -# fabric-ca-server start -b admin:adminpw --cacount 2 -# -# fabric-ca-server start -b admin:adminpw --cafiles ca/ca1/fabric-ca-server-config.yaml -# --cafiles ca/ca2/fabric-ca-server-config.yaml -# -############################################################################# - -cacount: - -cafiles: - -############################################################################# -# Intermediate CA section -# -# The relationship between servers and CAs is as follows: -# 1) A single server process may contain or function as one or more CAs. -# This is configured by the "Multi CA section" above. -# 2) Each CA is either a root CA or an intermediate CA. -# 3) Each intermediate CA has a parent CA which is either a root CA or another intermediate CA. -# -# This section pertains to configuration of #2 and #3. -# If the "intermediate.parentserver.url" property is set, -# then this is an intermediate CA with the specified parent -# CA. -# -# parentserver section -# url - The URL of the parent server -# caname - Name of the CA to enroll within the server -# -# enrollment section used to enroll intermediate CA with parent CA -# profile - Name of the signing profile to use in issuing the certificate -# label - Label to use in HSM operations -# -# tls section for secure socket connection -# certfiles - PEM-encoded list of trusted root certificate files -# client: -# certfile - PEM-encoded certificate file for when client authentication -# is enabled on server -# keyfile - PEM-encoded key file for when client authentication -# is enabled on server -############################################################################# -intermediate: - parentserver: - url: - caname: - - enrollment: - hosts: - profile: - label: - - tls: - certfiles: - client: - certfile: - keyfile: diff --git a/app/platform/fabric/e2e-test/feature/configs/network-config.json b/app/platform/fabric/e2e-test/feature/configs/network-config.json deleted file mode 100644 index a123ceeb8..000000000 --- a/app/platform/fabric/e2e-test/feature/configs/network-config.json +++ /dev/null @@ -1,338 +0,0 @@ -{ - "name": "%(networkId)s", - "version": "1.x", - "networkID": "%(networkId)s", - "tls": %(tls)s, - "environment": "dev", - "headers": {"Content-Type": "application/json"}, - "client": { - "organization": "Org1ExampleCom" - }, - "organizations": { - "Org1ExampleCom": { - "mspid": "org1.example.com", - "peers": [ - "peer0.org1.example.com", - "peer1.org1.example.com" - ], - "certificateAuthorities": [ - "ca.org1.example.com" - ] - }, - "Org2ExampleCom": { - "mspid": "org2.example.com", - "peers": [ - "peer0.org2.example.com", - "peer1.org2.example.com" - ], - "certificateAuthorities": [ - "ca.org2.example.com" - ] - } - }, - "orderers": { - "orderer0.example.com": { - "url": "%(grpcType)s://localhost:7050", - "grpcOptions": { - "grpc.http2.keepalive_time": 360, - "grpc.keepalive_time_ms": 360000, - "grpc.http2.keepalive_timeout": 180, - "grpc.keepalive_timeout_ms": 180000 - }, - "tlsCACerts": { - "pem": "%(cacerts)s" - } - } - }, - "peers": { - "peer0.org1.example.com": { - "url": "%(grpcType)s://localhost:7051", - "eventUrl": "%(grpcType)s://localhost:7053", - "grpcOptions": { - "grpc.http2.keepalive_time": 360, - "grpc.keepalive_time_ms": 360000, - "grpc.http2.keepalive_timeout": 180, - "grpc.keepalive_timeout_ms": 180000 - }, - "tlsCACerts": { - "pem": "%(cacerts)s" - }, - "x-mspid": "Org1ExampleCom" - }, - "peer1.org1.example.com": { - "url": "%(grpcType)s://localhost:8051", - "eventUrl": "%(grpcType)s://localhost:8053", - "grpcOptions": { - "grpc.http2.keepalive_time": 360, - "grpc.keepalive_time_ms": 360000, - "grpc.http2.keepalive_timeout": 180, - "grpc.keepalive_timeout_ms": 180000 - }, - "tlsCACerts": { - "pem": "%(cacerts)s" - }, - "x-mspid": "Org1ExampleCom" - }, - "peer0.org2.example.com": { - "url": "%(grpcType)s://localhost:9051", - "eventUrl": "%(grpcType)s://localhost:9053", - "grpcOptions": { - "grpc.http2.keepalive_time": 360, - "grpc.keepalive_time_ms": 360000, - "grpc.http2.keepalive_timeout": 180, - "grpc.keepalive_timeout_ms": 180000 - }, - "tlsCACerts": { - "pem": "%(cacerts)s" - }, - "x-mspid": "Org2ExampleCom" - }, - "peer1.org2.example.com": { - "url": "%(grpcType)s://localhost:10051", - "eventUrl": "%(grpcType)s://localhost:10053", - "grpcOptions": { - "grpc.http2.keepalive_time": 360, - "grpc.keepalive_time_ms": 360000, - "grpc.http2.keepalive_timeout": 180, - "grpc.keepalive_timeout_ms": 180000 - }, - "tlsCACerts": { - "pem": "%(cacerts)s" - }, - "x-mspid": "Org2ExampleCom" - } - }, - "channels": { - "behavesystest": { - "orderers": [ - "orderer0.example.com" - ], - "peers": { - "peer0.org1.example.com": { - "x-chaincode": {} - }, - "peer1.org1.example.com": { - "x-chaincode": {} - } - }, - "chaincodes": [], - "x-members": [ - "Org1ExampleCom", - "Org2ExampleCom" - ] - } - }, - "certificateAuthorities": { - "ca.example.com": { - "url": "%(proto)s://localhost:7054", - "httpOptions": { - "verify": true - }, - "tlsCACerts": { - "pem": "%(cacerts)s" - }, - "registrar": [ - { - "enrollId": "Admin", - "enrollSecret": "adminpw" - } - ], - "caName": "ca.example.com", - "x-mspid": "ExampleCom" - }, - "ca.org1.example.com": { - "url": "%(proto)s://localhost:8054", - "httpOptions": { - "verify": true - }, - "tlsCACerts": { - "pem": "%(cacerts)s" - }, - "registrar": [ - { - "enrollId": "Admin", - "enrollSecret": "adminpw" - } - ], - "caName": "ca.org1.example.com", - "x-mspid": "Org1ExampleCom" - }, - "ca.org2.example.com": { - "url": "%(proto)s://localhost:9054", - "httpOptions": { - "verify": true - }, - "tlsCACerts": { - "pem": "%(cacerts)s" - }, - "registrar": [ - { - "enrollId": "Admin", - "enrollSecret": "adminpw" - } - ], - "caName": "ca.org2.example.com", - "x-mspid": "Org2ExampleCom" - } - }, - "network-config": { - "orderer": { - "url": "%(grpcType)s://localhost:7050", - "server-hostname": "orderer0.example.com", - "tls_cacerts": "%(config)s/ordererOrganizations/example.com/orderers/orderer0.example.com/tls/ca.crt", - "ca": "%(proto)s://localhost:7054" - }, - "Org1ExampleCom": { - "name": "Org1ExampleCom", - "mspid": "org1.example.com", - "ca": "%(proto)s://localhost:8054", - "peers": { - "peer0.org1.example.com": { - "requests": "%(grpcType)s://localhost:7051", - "events": "%(grpcType)s://localhost:7053", - "server-hostname": "peer0.org1.example.com", - "tls_cacerts": "%(config)s/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt" - }, - "peer1.org1.example.com": { - "requests": "%(grpcType)s://localhost:8051", - "events": "%(grpcType)s://localhost:8053", - "server-hostname": "peer1.org1.example.com", - "tls_cacerts": "%(config)s/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/tls/ca.crt" - } - }, - "admin": { - "key": "%(config)s/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/keystore", - "cert": "%(config)s/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/signcerts" - }, - "user1": { - "key": "%(config)s/peerOrganizations/org1.example.com/users/User1@org1.example.com/msp/keystore", - "cert": "%(config)s/peerOrganizations/org1.example.com/users/User1@org1.example.com/msp/signcerts" - } - }, - "Org2ExampleCom": { - "name": "Org2ExampleCom", - "mspid": "org2.example.com", - "ca": "%(proto)s://localhost:9054", - "peers": { - "peer0.org2.example.com": { - "requests": "%(grpcType)s://localhost:9051", - "events": "%(grpcType)s://localhost:9053", - "server-hostname": "peer0.org2.example.com", - "tls_cacerts": "%(config)s/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt" - }, - "peer1.org2.example.com": { - "requests": "%(grpcType)s://localhost:10051", - "events": "%(grpcType)s://localhost:10053", - "server-hostname": "peer1.org2.example.com", - "tls_cacerts": "%(config)s/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/tls/ca.crt" - } - }, - "admin": { - "key": "%(config)s/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp/keystore", - "cert": "%(config)s/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp/signcerts" - }, - "user1": { - "key": "%(config)s/peerOrganizations/org2.example.com/users/User1@org2.example.com/msp/keystore", - "cert": "%(config)s/peerOrganizations/org2.example.com/users/User1@org2.example.com/msp/signcerts" - } - } - }, - "common-connection-profile": { - "version":"1.0", - "channels":{ - "behavesystest":{ - "orderers":[ - "orderer0.example.com" - ], - "peers":{ - "peer0.org1.example.com":{ - "endorsingPeer":true, - "chaincodeQuery":true, - "ledgerQuery":true, - "eventSource":true - }, - "peer1.org1.example.com":{ - "endorsingPeer":false, - "chaincodeQuery":false, - "ledgerQuery":true, - "eventSource":false - }, - "peer0.org2.example.com":{ - "endorsingPeer":true, - "chaincodeQuery":true, - "ledgerQuery":true, - "eventSource":true - }, - "peer1.org2.example.com":{ - "endorsingPeer":false, - "chaincodeQuery":false, - "ledgerQuery":true, - "eventSource":false - } - } - } - }, - "orderers":{ - "orderer0.example.com":{ - "url":"%(grpcType)s://localhost:7050", - "grpcOptions":{ - "ssl-target-name-override":"orderer0.example.com" - }, - "tlsCACerts":{ - "path":"%(config)s/ordererOrganizations/example.com/orderers/orderer0.example.com/tls/ca.crt" - } - } - }, - "organizations":{ - "Org1ExampleCom": { - "name": "Org1ExampleCom", - "mspid": "org1.example.com", - "ca": "https://localhost:7054", - "peers": [ - "peer0.org1.example.com", - "peer1.org1.example.com" - ], - "adminPrivateKeyPEM": "%(config)s/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/keystore", - "signedCertPEM": "%(config)s/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/signcerts" - } - }, - "peers":{ - "peer0.org1.example.com":{ - "url":"%(grpcType)s://localhost:7051", - "grpcOptions":{ - "ssl-target-name-override":"peer0.org1.example.com" - }, - "tlsCACerts":{ - "path":"%(config)s/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt" - } - }, - "peer1.org1.example.com":{ - "url":"%(grpcType)s://localhost:8051", - "grpcOptions":{ - "ssl-target-name-override":"peer1.org1.example.com" - }, - "tlsCACerts":{ - "path":"%(config)s/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/tls/ca.crt" - } - }, - "peer0.org2.example.com":{ - "url":"%(grpcType)s://localhost:9051", - "grpcOptions":{ - "ssl-target-name-override":"peer0.org2.example.com" - }, - "tlsCACerts":{ - "path":"%(config)s/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt" - } - }, - "peer1.org2.example.com":{ - "url":"%(grpcType)s://localhost:10051", - "grpcOptions":{ - "ssl-target-name-override":"peer1.org2.example.com" - }, - "tlsCACerts":{ - "path":"%(config)s/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/tls/ca.crt" - } - } - } - } -} diff --git a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-base.yml b/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-base.yml deleted file mode 100644 index af3c95297..000000000 --- a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-base.yml +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -version: '2' - -services: - - zookeeper: - image: hyperledger/fabric-zookeeper - ports: - - 2181 - - 2888 - - 3888 - - kafka: - image: hyperledger/fabric-kafka - environment: - - KAFKA_LOG_RETENTION_MS=-1 - - KAFKA_MESSAGE_MAX_BYTES=103809024 - - KAFKA_REPLICA_FETCH_MAX_BYTES=103809024 - - KAFKA_UNCLEAN_LEADER_ELECTION_ENABLE=false - - KAFKA_DEFAULT_REPLICATION_FACTOR=${KAFKA_DEFAULT_REPLICATION_FACTOR} - - KAFKA_MIN_INSYNC_REPLICAS=2 - ports: - - 9092 - - ca: - image: hyperledger/fabric-ca - environment: - - FABRIC_CA_HOME=/var/hyperledger/fabric-ca-server - - FABRIC_CA_SERVER_HOME=/var/hyperledger/fabric-ca-server - - FABRIC_CA_SERVER_TLS_ENABLED=${FABRIC_CA_SERVER_TLS_ENABLED} - - FABRIC_CA_SERVER_DEBUG=true - ports: - - 7054 - volumes: - - ../configs/${CORE_PEER_NETWORKID}:/var/hyperledger/fabric-ca-server-config - - orderer: - image: hyperledger/fabric-orderer - environment: - - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=${CORE_PEER_NETWORKID}_behave - - ORDERER_HOME=/var/hyperledger/orderer - - ORDERER_GENERAL_LOGLEVEL=debug - - ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/msp - - ORDERER_GENERAL_LOCALMSPID=example.com - - ORDERER_GENERAL_LISTENADDRESS=0.0.0.0 - - ORDERER_GENERAL_LISTENPORT=7050 - - ORDERER_GENERAL_LEDGERTYPE=ram - - ORDERER_GENERAL_GENESISMETHOD=file - - ORDERER_GENERAL_GENESISFILE=/var/hyperledger/configs/orderer.block - - CONFIGTX_ORDERER_ORDERERTYPE=solo - - CONFIGTX_ORDERER_BATCHSIZE_MAXMESSAGECOUNT=${CONFIGTX_ORDERER_BATCHSIZE_MAXMESSAGECOUNT} - - CONFIGTX_ORDERER_BATCHTIMEOUT=${CONFIGTX_ORDERER_BATCHTIMEOUT} - - CONFIGTX_ORDERER_ADDRESSES=[127.0.0.1:7050] - # TLS settings - - ORDERER_GENERAL_TLS_ENABLED=${ORDERER_GENERAL_TLS_ENABLED} - - ORDERER_GENERAL_TLS_PRIVATEKEY=${ORDERER_GENERAL_TLS_PRIVATEKEY} - - ORDERER_GENERAL_TLS_CERTIFICATE=${ORDERER_GENERAL_TLS_CERTIFICATE} - - ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/tls/ca.crt] - - ORDERER_TLS_CLIENTAUTHREQUIRED=${ORDERER_TLS_CLIENTAUTHREQUIRED} - - ORDERER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@example.com/tls/ca.crt - - ORDERER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@example.com/tls/client.crt - - ORDERER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}:/var/hyperledger/configs - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/users:/var/hyperledger/users - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/ca:/var/hyperledger/ca - working_dir: /opt/gopath/src/github.com/hyperledger/fabric/orderer - command: orderer - ports: - - '7050' - - couchdb: - image: hyperledger/fabric-couchdb - - peer: - image: hyperledger/fabric-peer - environment: - - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock - - CORE_PEER_NETWORKID=${CORE_PEER_NETWORKID} - - CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=${CORE_PEER_NETWORKID}_behave - - CORE_PEER_ADDRESSAUTODETECT=true - - CORE_PEER_GOSSIP_ORGLEADER=false - - CORE_PEER_GOSSIP_USELEADERELECTION=true - - CORE_PEER_PROFILE_ENABLED=true - - CORE_PEER_MSPCONFIGPATH=/var/hyperledger/msp - - FABRIC_CA_HOME=/var/hyperledger - - FABRIC_CA_CLIENT_HOME=/var/hyperledger - - FABRIC_CA_CLIENT_TLS_CERTFILES=/var/hyperledger/tls/ca.crt - #- FABRIC_LOGGING_SPEC=peer=DEBUG:endorser=DEBUG:nodeCmd=DEBUG:committer=DEBUG - #- FABRIC_LOGGING_SPEC=peer=DEBUG - - FABRIC_LOGGING_SPEC=${FABRIC_LOGGING_SPEC} - # TLS settings - - CORE_PEER_TLS_ENABLED=${CORE_PEER_TLS_ENABLED} - - CORE_PEER_TLS_CLIENTAUTHREQUIRED=${CORE_PEER_TLS_CLIENTAUTHREQUIRED} - - CORE_PEER_TLS_CERT_FILE=${CORE_PEER_TLS_CERT_FILE} - - CORE_PEER_TLS_KEY_FILE=${CORE_PEER_TLS_KEY_FILE} - - CORE_PEER_TLS_ROOTCERT_FILE=/var/hyperledger/tls/ca.crt - volumes: - - /var/run/:/host/var/run/ - - ../../../common:/opt/gopath/src/github.com/hyperledger/fabric/common - - ../../../vendor:/opt/gopath/src/github.com/hyperledger/fabric/vendor - - ../configs:/var/hyperledger/configs - command: peer node start - ports: - - '7051' - - '7053' - diff --git a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-cli.yml b/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-cli.yml deleted file mode 100644 index aae53191e..000000000 --- a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-cli.yml +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -version: '2' - -networks: - behave: - -services: - cli: - container_name: cli - image: hyperledger/fabric-tools - tty: true - environment: - - GOPATH=/opt/gopath - - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock - - FABRIC_LOGGING_SPEC=DEBUG - # TLS settings - - CORE_PEER_TLS_ENABLED=${CORE_PEER_TLS_ENABLED} - - CORE_PEER_TLS_CERT_FILE=/var/hyperledger/tls/server.crt - - CORE_PEER_TLS_KEY_FILE=/var/hyperledger/tls/server.key - - CORE_PEER_TLS_ROOTCERT_FILE=/var/hyperledger/tls/ca.crt - - CORE_PEER_TLS_CLIENTAUTHREQUIRED=${CORE_PEER_TLS_CLIENTAUTHREQUIRED} - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org1.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.key - working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer - command: /bin/bash -c 'sleep 6000000000000000000' - volumes: - - /var/run/:/host/var/run/ - - ../../fabric/examples/chaincode:/opt/gopath/src/github.com/hyperledger/fabric/examples/chaincode - - ../../fabric-samples/chaincode:/opt/gopath/src/github.com/hyperledger/fabric-samples/chaincode - - ../../fabric-sdk-java/src/test/fixture/sdkintegration:/opt/gopath/src/github.com/hyperledger/fabric-sdk-java/chaincode - - ../../chaincodes:/opt/gopath/src/github.com/hyperledger/fabric-test/chaincodes - - ../../fabric/common:/opt/gopath/src/github.com/hyperledger/fabric/common - - ../../fabric/vendor:/opt/gopath/src/github.com/hyperledger/fabric/vendor - - ../configs:/var/hyperledger/configs - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/users:/var/hyperledger/users - ports: - - '9092' - depends_on: - - orderer0.example.com - - peer0.org1.example.com - - peer1.org1.example.com - - peer0.org2.example.com - - peer1.org2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} diff --git a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-couchdb.yml b/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-couchdb.yml deleted file mode 100644 index 1dace10ac..000000000 --- a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-couchdb.yml +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -version: '2' - -networks: - behave: - - -services: - couchdb01: - extends: - file: docker-compose-base.yml - service: couchdb - container_name: couchdb01 - # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service, - # for example map it to utilize Fauxton User Interface in dev environments. - ports: - - "5984:5984" - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - peer0.org1.example.com: - environment: - - CORE_LEDGER_STATE_STATEDATABASE=CouchDB - - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb01:5984 - depends_on: - - orderer0.example.com - - couchdb01 - - couchdb02: - extends: - file: docker-compose-base.yml - service: couchdb - container_name: couchdb02 - # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service, - # for example map it to utilize Fauxton User Interface in dev environments. - ports: - - "6984:5984" - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - peer0.org2.example.com: - environment: - - CORE_LEDGER_STATE_STATEDATABASE=CouchDB - - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb02:5984 - depends_on: - - orderer0.example.com - - couchdb02 - - couchdb11: - extends: - file: docker-compose-base.yml - service: couchdb - container_name: couchdb11 - # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service, - # for example map it to utilize Fauxton User Interface in dev environments. - ports: - - "7984:5984" - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - peer1.org1.example.com: - environment: - - CORE_LEDGER_STATE_STATEDATABASE=CouchDB - - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb11:5984 - depends_on: - - orderer0.example.com - - couchdb11 - - couchdb12: - extends: - file: docker-compose-base.yml - service: couchdb - container_name: couchdb12 - # Comment/Uncomment the port mapping if you want to hide/expose the CouchDB service, - # for example map it to utilize Fauxton User Interface in dev environments. - ports: - - "8984:5984" - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - peer1.org2.example.com: - environment: - - CORE_LEDGER_STATE_STATEDATABASE=CouchDB - - CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb12:5984 - depends_on: - - orderer0.example.com - - couchdb12 diff --git a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-kafka-sd.yml b/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-kafka-sd.yml deleted file mode 100644 index 98bea8718..000000000 --- a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-kafka-sd.yml +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -version: '2' - -networks: - behave: - -services: - - ca.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.example.com - #- FABRIC_CA_SERVER_HOME=/var/hyperledger/fabric-ca-server - #- FABRIC_CA_SERVER_CSR_HOSTS=ca.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_TLS_KEYFILE=/var/hyperledger/fabric-ca-server-config/tls/server.key - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_EXAMPLE_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_EXAMPLE_TLS_KEYFILE} - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 7054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - ca.org1.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.org1.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.org1.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.org1.example.com - #- FABRIC_CA_SERVER_CSR_HOSTS=ca.org1.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_ORG1_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_ORG1_TLS_KEYFILE} - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_TLS_KEYFILE=/var/hyperledger/fabric-ca-server-config/tls/server.key - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.org1.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 8054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - ca.org2.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.org2.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.org2.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.org2.example.com - - FABRIC_CA_SERVER_CSR_HOSTS=ca.org2.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_ORG2_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_ORG2_TLS_KEYFILE} - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.org2.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 9054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper0: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper0 - environment: - - ZOO_MY_ID=1 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper1: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper1 - environment: - - ZOO_MY_ID=2 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper2: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper2 - environment: - - ZOO_MY_ID=3 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka0: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka0 - environment: - - KAFKA_BROKER_ID=0 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka1: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka1 - environment: - - KAFKA_BROKER_ID=1 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka2: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka2 - environment: - - KAFKA_BROKER_ID=2 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka3: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka3 - environment: - - KAFKA_BROKER_ID=3 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - orderer0.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer0.example.com - environment: - - ORDERER_HOST=orderer0.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer0.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 7050:7050 - - orderer1.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer1.example.com - environment: - - ORDERER_HOST=orderer1.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer1.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 8050:7050 - - orderer2.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer2.example.com - environment: - - ORDERER_HOST=orderer2.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer2.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 9050:7050 - - peer0.org1.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer0.org1.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer0.org1.example.com:7052 - - CORE_PEER_ID=peer0.org1.example.com - - CORE_PEER_ADDRESS=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org1.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG1} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG1} - - CORE_PEER_LOCALMSPID=org1.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org1.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/users:/var/hyperledger/users - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/ca:/var/hyperledger/ca - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 7051:7051 - - 7053:7053 - - peer0.org2.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer0.org2.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer0.org2.example.com:7052 - - CORE_PEER_ID=peer0.org2.example.com - - CORE_PEER_ADDRESS=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org2.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG2} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG2} - - CORE_PEER_LOCALMSPID=org2.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org2.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 9051:7051 - - 9053:7053 - - peer1.org1.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer1.org1.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer1.org1.example.com:7052 - - CORE_PEER_ID=peer1.org1.example.com - - CORE_PEER_ADDRESS=peer1.org1.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org1.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG1} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG1} - - CORE_PEER_LOCALMSPID=org1.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org1.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - - peer0.org1.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 8051:7051 - - 8053:7053 - - peer1.org2.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer1.org2.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer1.org2.example.com:7052 - - CORE_PEER_ID=peer1.org2.example.com - - CORE_PEER_ADDRESS=peer1.org2.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org2.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG2} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG2} - - CORE_PEER_LOCALMSPID=org2.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org2.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - - peer0.org2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 10051:7051 - - 10053:7053 diff --git a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-kafka.yml b/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-kafka.yml deleted file mode 100644 index 46ec83183..000000000 --- a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-kafka.yml +++ /dev/null @@ -1,441 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -version: '2' - -networks: - behave: - -services: - - ca.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.example.com - #- FABRIC_CA_SERVER_HOME=/var/hyperledger/fabric-ca-server - #- FABRIC_CA_SERVER_CSR_HOSTS=ca.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_TLS_KEYFILE=/var/hyperledger/fabric-ca-server-config/tls/server.key - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_EXAMPLE_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_EXAMPLE_TLS_KEYFILE} - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 7054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - ca.org1.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.org1.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.org1.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.org1.example.com - #- FABRIC_CA_SERVER_CSR_HOSTS=ca.org1.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_ORG1_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_ORG1_TLS_KEYFILE} - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_TLS_KEYFILE=/var/hyperledger/fabric-ca-server-config/tls/server.key - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.org1.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 8054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - ca.org2.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.org2.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.org2.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.org2.example.com - - FABRIC_CA_SERVER_CSR_HOSTS=ca.org2.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_ORG2_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_ORG2_TLS_KEYFILE} - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.org2.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 9054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper0: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper0 - environment: - - ZOO_MY_ID=1 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper1: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper1 - environment: - - ZOO_MY_ID=2 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper2: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper2 - environment: - - ZOO_MY_ID=3 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka0: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka0 - environment: - - KAFKA_BROKER_ID=0 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka1: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka1 - environment: - - KAFKA_BROKER_ID=1 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka2: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka2 - environment: - - KAFKA_BROKER_ID=2 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka3: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka3 - environment: - - KAFKA_BROKER_ID=3 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - orderer0.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer0.example.com - environment: - - ORDERER_HOST=orderer0.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer0.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 7050:7050 - - orderer1.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer1.example.com - environment: - - ORDERER_HOST=orderer1.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer1.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 8050:7050 - - orderer2.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer2.example.com - environment: - - ORDERER_HOST=orderer2.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer2.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 9050:7050 - - peer0.org1.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer0.org1.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer0.org1.example.com:7052 - - CORE_PEER_ID=peer0.org1.example.com - - CORE_PEER_ADDRESS=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org1.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG1} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG1} - - CORE_PEER_LOCALMSPID=org1.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org1.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/users:/var/hyperledger/users - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/ca:/var/hyperledger/ca - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 7051:7051 - - 7053:7053 - - peer0.org2.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer0.org2.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer0.org2.example.com:7052 - - CORE_PEER_ID=peer0.org2.example.com - - CORE_PEER_ADDRESS=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org2.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG2} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG2} - - CORE_PEER_LOCALMSPID=org2.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org2.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 9051:7051 - - 9053:7053 - - peer1.org1.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer1.org1.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer1.org1.example.com:7052 - - CORE_PEER_ID=peer1.org1.example.com - - CORE_PEER_ADDRESS=peer1.org1.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG1} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG1} - - CORE_PEER_LOCALMSPID=org1.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org1.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - - peer0.org1.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 8051:7051 - - 8053:7053 - - peer1.org2.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer1.org2.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer1.org2.example.com:7052 - - CORE_PEER_ID=peer1.org2.example.com - - CORE_PEER_ADDRESS=peer1.org2.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG2} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG2} - - CORE_PEER_LOCALMSPID=org2.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org2.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - - peer0.org2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 10051:7051 - - 10053:7053 diff --git a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-peer-org3.yml b/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-peer-org3.yml deleted file mode 100644 index 100a07898..000000000 --- a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-peer-org3.yml +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -version: '2' - -networks: - behave: - -services: - peer0.org3.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer0.org3.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer0.org3.example.com:7052 - - CORE_PEER_ID=peer0.org3.example.com - - CORE_PEER_ADDRESS=peer0.org3.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org3.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org3.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG3} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG3} - - CORE_PEER_LOCALMSPID=org3.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org3.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org3.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org3.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org3.example.com/users:/var/hyperledger/users - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 11051:7051 - - 11053:7053 - - peer1.org3.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer1.org3.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer1.org3.example.com:7052 - - CORE_PEER_ID=peer1.org3.example.com - - CORE_PEER_ADDRESS=peer1.org3.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org3.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG3} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG3} - - CORE_PEER_LOCALMSPID=org3.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org3.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org3.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org3.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org3.example.com/peers/peer1.org3.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org3.example.com/peers/peer1.org3.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org3.example.com/users:/var/hyperledger/users - depends_on: - - peer0.org3.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 12051:7051 - - 12053:7053 diff --git a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-solo.yml b/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-solo.yml deleted file mode 100644 index 567ff6240..000000000 --- a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-solo.yml +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -version: '2' - -networks: - behave: - -services: - - ca.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.example.com - #- FABRIC_CA_SERVER_HOME=/var/hyperledger/fabric-ca-server - #- FABRIC_CA_SERVER_CSR_HOSTS=ca.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_TLS_KEYFILE=/var/hyperledger/fabric-ca-server-config/tls/server.key - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_EXAMPLE_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_EXAMPLE_TLS_KEYFILE} - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 7054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - ca.org1.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.org1.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.org1.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.org1.example.com - #- FABRIC_CA_SERVER_CSR_HOSTS=ca.org1.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_ORG1_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_ORG1_TLS_KEYFILE} - #- FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/tls/server.crt - #- FABRIC_CA_SERVER_TLS_KEYFILE=/var/hyperledger/fabric-ca-server-config/tls/server.key - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.org1.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org1.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 8054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - ca.org2.example.com: - extends: - file: docker-compose-base.yml - service: ca - container_name: ca.org2.example.com - environment: - - FABRIC_CA_SERVER_CA_NAME=ca.org2.example.com - - BOOTSTRAP_USER_PASS=Admin:adminpw - - FABRIC_CA_SERVER_CSR_CN=ca.org2.example.com - - FABRIC_CA_SERVER_CSR_HOSTS=ca.org2.example.com - - FABRIC_CA_SERVER_CA_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem - - FABRIC_CA_SERVER_TLS_CERTFILE=/var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem - - FABRIC_CA_SERVER_CA_KEYFILE=${FABRIC_CA_SERVER_ORG2_TLS_KEYFILE} - - FABRIC_CA_SERVER_TLS_KEYFILE=${FABRIC_CA_SERVER_ORG2_TLS_KEYFILE} - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --tls.certfile /var/hyperledger/fabric-ca-server-config/tls/server.crt --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.keyfile /var/hyperledger/fabric-ca-server-config/tls/server.key -b $${BOOTSTRAP_USER_PASS} -d' - #command: sh -c 'fabric-ca-server start -b $${BOOTSTRAP_USER_PASS} --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts ca.org2.example.com,localhost -d' - command: sh -c 'fabric-ca-server start --ca.certfile /var/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem --ca.keyfile $${FABRIC_CA_SERVER_CA_KEYFILE} --tls.enabled --csr.hosts $${FABRIC_CA_SERVER_CA_NAME},localhost,0.0.0.0 -b $${BOOTSTRAP_USER_PASS}' - #command: sh -c 'fabric-ca-server start -d -b $${BOOTSTRAP_USER_PASS} --port 9054' - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/ca:/var/hyperledger/fabric-ca-server-config - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com:/var/hyperledger/fabric-ca-server - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper0: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper0 - environment: - - ZOO_MY_ID=1 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper1: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper1 - environment: - - ZOO_MY_ID=2 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper2: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper2 - environment: - - ZOO_MY_ID=3 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka0: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka0 - environment: - - KAFKA_BROKER_ID=0 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka1: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka1 - environment: - - KAFKA_BROKER_ID=1 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka2: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka2 - environment: - - KAFKA_BROKER_ID=2 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka3: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka3 - environment: - - KAFKA_BROKER_ID=3 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper0: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper0 - environment: - - ZOO_MY_ID=1 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper1: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper1 - environment: - - ZOO_MY_ID=2 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - zookeeper2: - extends: - file: docker-compose-base.yml - service: zookeeper - container_name: zookeeper2 - environment: - - ZOO_MY_ID=3 - - ZOO_SERVERS=server.1=zookeeper0:2888:3888 server.2=zookeeper1:2888:3888 server.3=zookeeper2:2888:3888 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka0: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka0 - environment: - - KAFKA_BROKER_ID=0 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka1: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka1 - environment: - - KAFKA_BROKER_ID=1 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka2: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka2 - environment: - - KAFKA_BROKER_ID=2 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - kafka3: - extends: - file: docker-compose-base.yml - service: kafka - container_name: kafka3 - environment: - - KAFKA_BROKER_ID=3 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper0:2181,zookeeper1:2181,zookeeper2:2181 - - KAFKA_MESSAGE_MAX_BYTES=${KAFKA_MESSAGE_MAX_BYTES} - - KAFKA_REPLICA_FETCH_MAX_BYTES=${KAFKA_REPLICA_FETCH_MAX_BYTES} - - KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES=${KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES} - depends_on: - - zookeeper0 - - zookeeper1 - - zookeeper2 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - - orderer0.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer0.example.com - environment: - - ORDERER_HOST=orderer0.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer0.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 7050:7050 - - orderer1.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer1.example.com - environment: - - ORDERER_HOST=orderer1.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer1.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 8050:7050 - - orderer2.example.com: - extends: - file: docker-compose-base.yml - service: orderer - container_name: orderer2.example.com - environment: - - ORDERER_HOST=orderer2.example.com - - CONFIGTX_ORDERER_ORDERERTYPE=kafka - - CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0:9092,kafka1:9092,kafka2:9092,kafka3:9092] - - ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s - - ORDERER_KAFKA_RETRY_SHORTTOTAL=30s - - ORDERER_KAFKA_VERBOSE=true - - ORDERER_GENERAL_GENESISPROFILE=SampleInsecureKafka - - ORDERER_ABSOLUTEMAXBYTES=${ORDERER_ABSOLUTEMAXBYTES} - - ORDERER_PREFERREDMAXBYTES=${ORDERER_PREFERREDMAXBYTES} - volumes: - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/ordererOrganizations/example.com/orderers/orderer2.example.com/tls:/var/hyperledger/tls - depends_on: - - kafka0 - - kafka1 - - kafka2 - - kafka3 - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 9050:7050 - - peer0.org1.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer0.org1.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer0.org1.example.com:7052 - - CORE_PEER_ID=peer0.org1.example.com - - CORE_PEER_ADDRESS=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org1.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG1} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG1} - - CORE_PEER_LOCALMSPID=org1.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org1.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/users:/var/hyperledger/users - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/ca:/var/hyperledger/ca - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 7051:7051 - - 7053:7053 - - peer0.org2.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer0.org2.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer0.org2.example.com:7052 - - CORE_PEER_ID=peer0.org2.example.com - - CORE_PEER_ADDRESS=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org2.example.com:7051 - - CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG2} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG2} - - CORE_PEER_LOCALMSPID=org2.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org2.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 9051:7051 - - 9053:7053 - - peer1.org1.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer1.org1.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer1.org1.example.com:7052 - - CORE_PEER_ID=peer1.org1.example.com - - CORE_PEER_ADDRESS=peer1.org1.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org1.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG1} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG1} - - CORE_PEER_LOCALMSPID=org1.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org1.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org1.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org1.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - - peer0.org1.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 8051:7051 - - 8053:7053 - - peer1.org2.example.com: - extends: - file: docker-compose-base.yml - service: peer - container_name: peer1.org2.example.com - environment: - - CORE_PEER_CHAINCODELISTENADDRESS=peer1.org2.example.com:7052 - - CORE_PEER_ID=peer1.org2.example.com - - CORE_PEER_ADDRESS=peer1.org2.example.com:7051 - - CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org2.example.com:7051 - - CORE_PEER_GOSSIP_ORGLEADER=${CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG2} - - CORE_PEER_GOSSIP_USELEADERELECTION=${CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG2} - - CORE_PEER_LOCALMSPID=org2.example.com - - CORE_PEER_TLS_CLIENTROOTCAS_FILES=/var/hyperledger/users/Admin@org2.example.com/tls/ca.crt - - CORE_PEER_TLS_CLIENTCERT_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.crt - - CORE_PEER_TLS_CLIENTKEY_FILE=/var/hyperledger/users/Admin@org2.example.com/tls/client.key - volumes: - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/msp:/var/hyperledger/msp - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/tls:/var/hyperledger/tls - - ../configs/${CORE_PEER_NETWORKID}/peerOrganizations/org2.example.com/users:/var/hyperledger/users - depends_on: - - orderer0.example.com - - orderer1.example.com - - orderer2.example.com - - peer0.org2.example.com - networks: - behave: - aliases: - - ${CORE_PEER_NETWORKID} - ports: - - 10051:7051 - - 10053:7053 diff --git a/app/platform/fabric/e2e-test/feature/environment.py b/app/platform/fabric/e2e-test/feature/environment.py deleted file mode 100644 index b652305ac..000000000 --- a/app/platform/fabric/e2e-test/feature/environment.py +++ /dev/null @@ -1,135 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import subprocess -import shutil -import gc -import psutil -from steps.endorser_util import CLIInterface, ToolInterface - - -def getLogFiles(containers, fileSuffix): - """ This will gather the logs for the different component containers as well as - the chaincode containers. If the containers is a list of strings, it is - assumed this is a chaincode container list. Otherwise, the list is a list - of Container objects. - """ - for container in containers: - if isinstance(container, str): - namePart, sep, _ = container.rpartition("-") - containerName = container - else: - namePart = container.containerName - containerName = container.containerName - try: - with open(namePart + fileSuffix, "w+") as logfile: - rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) - if rc !=0 : - print("Cannot get logs for {0}. Docker rc = {1}".format(namePart, rc)) - except: - print("Unable to get the logs for {}".format(namePart + fileSuffix)) - - -def before_scenario(context, scenario): - # Remove all existing containers if any - output = str(subprocess.check_output(["docker ps -aq"], shell=True)) - container_list = output.strip().split('\n') - for container in container_list: - if container != '': - subprocess.call(['docker rm -f {}'.format(container)], shell=True) - - -def after_scenario(context, scenario): - # Display memory usage before tearing down the network - mem = psutil.virtual_memory() - print("Memory Info Before Network Teardown:\n\tFree: {}\n\tUsed: {}\n\tPercentage: {}\n".format(mem.free, mem.used, mem.percent)) - - if hasattr(context, "printEnvWarning") and context.printEnvWarning: - print("WARNING: The permissions on the newly generated user files did not match the original files. Workaround was deployed.") - - # Show files in the configs directory for this test - if hasattr(context, "projectName"): - output = subprocess.check_output(["ls -ltr configs/{}".format(context.projectName)], shell=True) - print(output) - output = subprocess.check_output(["ls -ltr configs/{}/peerOrganizations/org*.example.com/users".format(context.projectName)], shell=True) - print(output) - - getLogs = context.config.userdata.get("logs", "N") - if getLogs.lower() == "force" or (scenario.status == "failed" and getLogs.lower() == "y" and "compose_containers" in context): - print("Collecting container logs for Scenario '{}'".format(scenario.name)) - # Replace spaces and slashes with underscores - fileSuffix = "_" + scenario.name.replace(" ", "_").replace("/", "_") + ".log" - # get logs from the peer containers - getLogFiles(context.composition.containerDataList, fileSuffix) - # get logs from the chaincode containers - chaincodeContainers = subprocess.check_output(["docker", "ps", "-f", "name=-peer", "--format", "{{.Names}}"]) - getLogFiles(chaincodeContainers.splitlines(), fileSuffix) - - if 'doNotDecompose' in scenario.tags: - if 'compose_yaml' in context: - print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, - context.compose_yaml)) - elif 'composition' in context: - # Remove config data and docker containers - shutil.rmtree("configs/%s" % context.composition.projectName) - shutil.rmtree("/tmp/fabric-client-kvs_org1", ignore_errors=True) - shutil.rmtree("/tmp/fabric-client-kvs_org2", ignore_errors=True) - context.composition.decompose() - elif hasattr(context, 'projectName'): - shutil.rmtree("configs/%s" % context.projectName) - - # Print memory information after every scenario - memory = subprocess.check_output(["df", "-h"], shell=True) - print("\nMemory Usage Info:\n{}\n".format(memory)) - mem = psutil.virtual_memory() - print("*** Memory Info:\n\tFree: {}\n\tUsed: {}\n\tPercentage: {}\n".format(mem.free, mem.used, mem.percent)) - - # Clean up memory in between scenarios, just in case - if hasattr(context, "random_key"): - del context.random_key - if hasattr(context, "payload"): - del context.payload - if hasattr(context, "composition"): - del context.composition - if hasattr(context, "result"): - del context.result - gc.collect() - -def before_all(context): - # Be sure to use a fresh install of the vendored packages for this chaincode - shutil.rmtree("../fabric/examples/chaincode/go/enccc_example/vendor", ignore_errors=True) - - # Performing `npm install` before test suit not before test cases. - shutil.rmtree("./node_modules", ignore_errors=True) - shutil.rmtree("./package-lock.json", ignore_errors=True) - shutil.copyfile("package.json", "../../../package.json") - npminstall = subprocess.check_output(["npm install --silent"], - env=os.environ, - cwd="../../..", - shell=True) - print("npm install: {}".format(npminstall)) - shutil.copytree("../../../node_modules", "./node_modules") - context.interface = CLIInterface() - context.remote = False - if context.config.userdata.get("network", None) is not None: - context.network = context.config.userdata["network"] - context.remote = True - context.interface = ToolInterface(context) - - mem = psutil.virtual_memory() - print("Starting Memory Info:\n\tFree: {}\n\tUsed: {}\n\tPercentage: {}\n".format(mem.free, mem.used, mem.percent)) - -def after_all(context): - # Removing Node modules at the end of the test suites - if os.path.exists("./node_modules"): - shutil.rmtree("./node_modules", ignore_errors=True) - shutil.rmtree("../../../node_modules", ignore_errors=True) - shutil.rmtree("../../../package-lock.json", ignore_errors=True) - subprocess.call(["npm cache clear --force"], shell=True) - # subprocess.call(["npm i -g npm"], shell=True) - mem = psutil.virtual_memory() - print("\nEnding Memory Info:\n\tFree: {}\n\tUsed: {}\n\tPercentage: {}".format(mem.free, mem.used, mem.percent)) diff --git a/app/platform/fabric/e2e-test/feature/explorer-configs/config-balance-transfer.json b/app/platform/fabric/e2e-test/feature/explorer-configs/config-balance-transfer.json deleted file mode 100644 index 30fbd01f2..000000000 --- a/app/platform/fabric/e2e-test/feature/explorer-configs/config-balance-transfer.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "network-configs": { - "balance-transfer": { - "name": "balance-transfer", - "profile": "./connection-profile/balance-transfer.json" - } - }, - "license": "Apache-2.0" -} diff --git a/app/platform/fabric/e2e-test/feature/explorer-configs/config-first-network.json b/app/platform/fabric/e2e-test/feature/explorer-configs/config-first-network.json deleted file mode 100644 index 29b9812cb..000000000 --- a/app/platform/fabric/e2e-test/feature/explorer-configs/config-first-network.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "network-configs": { - "first-network": { - "name": "first-network", - "profile": "./connection-profile/first-network.json" - } - }, - "license": "Apache-2.0" -} diff --git a/app/platform/fabric/e2e-test/feature/explorer-configs/config-solo-tls-disabled.json b/app/platform/fabric/e2e-test/feature/explorer-configs/config-solo-tls-disabled.json deleted file mode 100644 index f2a4c4573..000000000 --- a/app/platform/fabric/e2e-test/feature/explorer-configs/config-solo-tls-disabled.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "network-configs": { - "first-network": { - "name": "solo-tls-disabled", - "profile": "./connection-profile/solo-tls-disabled.json" - } - }, - "license": "Apache-2.0" -} diff --git a/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/balance-transfer.json b/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/balance-transfer.json deleted file mode 100644 index b49199990..000000000 --- a/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/balance-transfer.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "name": "balance-transfer-network", - "version": "1.0.0", - "license": "Apache-2.0", - "client": { - "tlsEnable": true, - "adminUser": "latitia", - "adminPassword": "h3ll0", - "enableAuthentication": false, - "organization": "Org1", - "connection": { - "timeout": { - "peer": { - "endorser": "300" - }, - "orderer": "300" - } - } - }, - "channels": { - "mychannel": { - "orderers": ["orderer0.example.com"], - "peers": { - "peer0.org1.example.com": { - "ledgerQuery": true - } - } - } - }, - "organizations": { - "Org1": { - "mspid": "org1.example.com", - "peers": ["peer0.org1.example.com", "peer1.org1.example.com"], - "certificateAuthorities": ["ca-org1"], - "adminPrivateKey": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/keystore/sk" - }, - "signedCert": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/signcerts/Admin@org1.example.com-cert.pem" - } - } - }, - - "peers": { - "peer0.org1.example.com": { - "url": "grpcs://peer0.org1.example.com:7051", - "grpcOptions": { - "ssl-target-name-override": "peer0.org1.example.com" - }, - "tlsCACerts": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/tlsca/tlsca.org1.example.com-cert.pem" - } - } - }, - "certificateAuthorities": { - "ca-org1": { - "url": "https://ca.org1.example.com:7054", - "httpOptions": { - "verify": false - }, - "tlsCACerts": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/ca/ca.org1.example.com-cert.pem" - }, - - "caName": "ca.org1.example.com" - } - } -} diff --git a/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/first-network.json b/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/first-network.json deleted file mode 100644 index 61830555d..000000000 --- a/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/first-network.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "name": "first-network", - "version": "1.0.0", - "license": "Apache-2.0", - "client": { - "tlsEnable": true, - "adminUser": "admin", - "adminPassword": "adminpw", - "enableAuthentication": false, - "organization": "Org1MSP", - "connection": { - "timeout": { - "peer": { - "endorser": "300" - }, - "orderer": "300" - } - } - }, - "channels": { - "mychannel": { - "peers": { - "peer0.org1.example.com": {} - }, - "connection": { - "timeout": { - "peer": { - "endorser": "6000", - "eventHub": "6000", - "eventReg": "6000" - } - } - } - } - }, - "organizations": { - "Org1MSP": { - "mspid": "Org1MSP", - "fullpath": true, - "adminPrivateKey": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/keystore/sk" - }, - "signedCert": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/signcerts/Admin@org1.example.com-cert.pem" - } - } - }, - "peers": { - "peer0.org1.example.com": { - "tlsCACerts": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt" - }, - "url": "grpcs://peer0.org1.example.com:7051", - "eventUrl": "grpcs://peer0.org1.example.com:7053", - "grpcOptions": { - "ssl-target-name-override": "peer0.org1.example.com" - } - } - } -} diff --git a/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/solo-tls-disabled.json b/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/solo-tls-disabled.json deleted file mode 100644 index e68884742..000000000 --- a/app/platform/fabric/e2e-test/feature/explorer-configs/connection-profile/solo-tls-disabled.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "name": "first-network", - "version": "1.0.0", - "license": "Apache-2.0", - "client": { - "tlsEnable": false, - "adminUser": "admin", - "adminPassword": "adminpw", - "enableAuthentication": false, - "organization": "Org1MSP", - "connection": { - "timeout": { - "peer": { - "endorser": "300" - }, - "orderer": "300" - } - } - }, - "channels": { - "mychannel": { - "peers": { - "peer0.org1.example.com": {} - }, - "connection": { - "timeout": { - "peer": { - "endorser": "6000", - "eventHub": "6000", - "eventReg": "6000" - } - } - } - } - }, - "organizations": { - "Org1MSP": { - "mspid": "org1.example.com", - "fullpath": true, - "adminPrivateKey": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/keystore/sk" - }, - "signedCert": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp/signcerts/Admin@org1.example.com-cert.pem" - } - } - }, - "peers": { - "peer0.org1.example.com": { - "tlsCACerts": { - "path": "/tmp/crypto/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt" - }, - "url": "grpc://peer0.org1.example.com:7051", - "eventUrl": "grpc://peer0.org1.example.com:7053", - "grpcOptions": { - "ssl-target-name-override": "peer0.org1.example.com" - } - } - } -} diff --git a/app/platform/fabric/e2e-test/feature/explorer.feature b/app/platform/fabric/e2e-test/feature/explorer.feature deleted file mode 100644 index c5ed2084f..000000000 --- a/app/platform/fabric/e2e-test/feature/explorer.feature +++ /dev/null @@ -1,195 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 - -Feature: Bootstrapping Hyperledger Explorer - As a user I want to be able to bootstrap Hyperledger Explorer - -@sanitycheck -# @doNotDecompose -Scenario Outline: : Bring up explorer and send requests to the basic REST API functions successfully - Given I have a bootstrapped fabric network of type - Given the NETWORK_PROFILE environment variable is solo-tls-disabled - - When an admin sets up a channel named "mychannel" - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" on channel "mychannel" - When a user invokes on the channel "mychannel" using chaincode named "mycc" with args ["invoke","a","b","10"] - When I wait "3" seconds - When a user queries on the channel "mychannel" using chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - When I start explorer - Then the logs on explorer.mynetwork.com contains "Please open web browser to access :" within 20 seconds - - # Need to wait enough until completing process a new BlockEvent - Given I wait "20" seconds - Given I set base URL to "http://localhost:8090" - When I make a GET request to "auth/networklist" - Then the response status code should equal 200 - Then the response structure should equal "networklistResp" - Then JSON at path ".networkList" should equal [[ "first-network", {} ]] - - When I make a POST request to "auth/login" with parameters - |user |password |network | - |test |test |first-network | - Then the response status code should equal 200 - Then the response structure should equal "loginResp" - Then JSON at path ".success" should equal true - Then JSON at path ".user.message" should equal "logged in" - Then JSON at path ".user.name" should equal "test" - - Given I want to reuse "token" parameter - Given I set Authorization header to "context.token" - When I make a GET request to "api/channels" - Then the response status code should equal 200 - Then the response structure should equal "channelsResp" - Then JSON at path ".channels" should equal ["mychannel"] - - When I make a GET request to "api/channels/info" - Then the response status code should equal 200 - Then the response structure should equal "channelsInfoResp" - Then JSON at path ".status" should equal 200 - Then JSON at path ".channels[0].channelname" should equal "mychannel" - - Given I want to reuse parameter "channel_genesis_hash" at path "channels[0].channel_genesis_hash" - Given I want to reuse parameter "block_height" at path "channels[0].blocks" - - # Building API route path by using variables stored in the context - When I make a GET request to the following path segment - # api/block// - |path | - |api | - |block | - |context.channel_genesis_hash | - |context.block_height | - Then the response status code should equal 200 - Then the response structure should equal "blockResp" - Then JSON at path ".status" should equal 200 - - When I make a GET request to "api/peersStatus/mychannel" - Then the response status code should equal 200 - Then the response structure should equal "peersStatusResp" - Then JSON at path ".status" should equal 200 - # TODO Currently the list in the response is empty - - When I make a GET request to the following path segment - # api/blockActivity/ - |path | - |api | - |blockActivity | - |context.channel_genesis_hash | - Then the response status code should equal 200 - Then the response structure should equal "blockactivityResp" - Then JSON at path ".status" should equal 200 - Then JSON at path ".row[0].channelname" should equal "mychannel" - - Examples: - |consensus_type| - |solo | - |kafka | - -@sanitycheck -# @doNotDecompose -Scenario Outline: Register a new user successfully using based orderer with a db using the with chaincode - Given I have a bootstrapped fabric network of type using state-database with tls - Given Copy "./bin/fabric-ca-client" to "/usr/local/bin/fabric-ca-client" on "peer0.org1.example.com" - Given Copy "./bin/fabric-ca-client" to "/usr/local/bin/fabric-ca-client" on "peer0.org2.example.com" - And I use the interface - And I enroll the following users using fabric-ca - | username | organization | password | role | certType | - | latitia | org1.example.com | h3ll0 | admin | x509 | - | scott | org2.example.com | th3r3 | member | x509 | - | adnan | org1.example.com | wh@tsup | member | x509 | - When an admin sets up a channel named "mychannel" - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" on channel "mychannel" - When a user invokes on the channel "mychannel" using chaincode named "mycc" with args ["invoke","a","b","10"] - When I wait "3" seconds - When a user queries on the channel "mychannel" using chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - Given the NETWORK_PROFILE environment variable is balance-transfer - When I start explorer - Then the logs on explorer.mynetwork.com contains "Please open web browser to access :" within 20 seconds - - # Need to wait enough until completing process a new BlockEvent - Given I wait "20" seconds - Given I set base URL to "http://localhost:8090" - - When I make a POST request to "auth/login" with parameters - |user |password |network | - |latitia |h3ll0 |balance-transfer | - Then the response status code should equal 200 - Then the response structure should equal "loginResp" - Then JSON at path ".success" should equal true - Then JSON at path ".user.message" should equal "logged in" - Then JSON at path ".user.name" should equal "latitia" - - Given I want to reuse "token" parameter - Given I set Authorization header to "context.token" - - When I make a POST request to "api/register" with parameters - |user |password |affiliation |role | - |test2 |test2 |example.com |admin | - Then the response status code should equal 200 - Then the response structure should equal "registerResp" - Then the response parameter "status" should equal 200 - - # duplicate call : api/register (fail) - When I make a POST request to "api/register" with parameters - |user |password |affiliation |role | - |test2 |test2 |example.com |admin | - Then the response status code should equal 200 - Then the response structure should equal "registerResp" - Then the response parameter "status" should equal 400 - Then the response parameter "message" should equal "Error: already exists" - -Examples: - | type | database | interface | path | language | - #| solo | leveldb | Java SDK | github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd | GOLANG | - | solo | leveldb | NodeJS SDK | github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd | GOLANG | - # | kafka | couchdb | CLI | ../../fabric-test/chaincodes/example02/node | NODE | - # | solo | leveldb | NodeJS SDK | ../../fabric-samples/chaincode/chaincode_example02/java | JAVA | - -@bugfix -# @doNotDecompose -Scenario: Check a variety of error cases - Given I have a bootstrapped fabric network of type kafka - Given the NETWORK_PROFILE environment variable is solo-tls-disabled - - # [BE-583] Memory Leak : Channel Event Hub shoud be created just once - When an admin sets up a channel named "mychannel" - When I start explorer - Then the logs on explorer.mynetwork.com contains "Please open web browser to access :" within 20 seconds - Then the explorer app logs contains "Successfully created channel event hub for" 1 time(s) within 60 seconds - - # Not supported to register a new user in network without fabric-ca - Given I set base URL to "http://localhost:8090" - When I make a POST request to "auth/login" with parameters - |user |password |network | - |test1 |test1 |first-network | - Then the response status code should equal 200 - Then the response structure should equal "loginResp" - Then JSON at path ".success" should equal true - Then JSON at path ".user.message" should equal "logged in" - Then JSON at path ".user.name" should equal "test1" - - Given I want to reuse "token" parameter - Given I set Authorization header to "context.token" - - When I make a POST request to "api/register" with parameters - |user |password |affiliation |role | - |test2 |test2 |department1 |admin | - Then the response status code should equal 200 - Then the response structure should equal "registerResp" - Then the response parameter "status" should equal 400 - Then the response parameter "message" should equal "Error: did not register with CA" - - # [BE-603] Create a channel with long channel name - # [BE-713] Detect a newly added channel - When an admin sets up a channel named "channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422" - Then the explorer app logs contains "Channel genesis hash for channel \[channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422\]" within 60 seconds - - # [BE-690] Keep explorer running after losing the default orderer - When "orderer0.example.com" is stopped - Then the explorer app logs contains "Succeeded to switch default orderer to orderer1.example.com" within 30 seconds - Given I wait "20" seconds - When "orderer1.example.com" is stopped - Then the explorer app logs contains "Succeeded to switch default orderer to orderer2.example.com" within 30 seconds diff --git a/app/platform/fabric/e2e-test/feature/explorer_gui_e2e.feature b/app/platform/fabric/e2e-test/feature/explorer_gui_e2e.feature deleted file mode 100644 index 5d9d37e37..000000000 --- a/app/platform/fabric/e2e-test/feature/explorer_gui_e2e.feature +++ /dev/null @@ -1,22 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 - -Feature: Bootstrapping Hyperledger Explorer - As a user I want to be able to bootstrap Hyperledger Explorer - -@doNotDecompose -Scenario: Bring up fabric network for GUI e2e test - Given For explorer env, I have a bootstrapped fabric network of type kafka-sd - Given the NETWORK_PROFILE environment variable is solo-tls-disabled - - When an admin sets up a channel named "mychannel" - Given Update "peer0.org1.example.com" of "Org1ExampleCom" as an anchor in "mychannel" - Given Update "peer0.org2.example.com" of "Org2ExampleCom" as an anchor in "mychannel" - - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" on channel "mychannel" - When a user invokes on the channel "mychannel" using chaincode named "mycc" with args ["invoke","a","b","10"] - When I wait "3" seconds - When a user queries on the channel "mychannel" using chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - When I start explorer - Then the logs on explorer.mynetwork.com contains "Please open web browser to access :" within 20 seconds diff --git a/app/platform/fabric/e2e-test/feature/fabric-ca.feature b/app/platform/fabric/e2e-test/feature/fabric-ca.feature deleted file mode 100644 index 69a706cda..000000000 --- a/app/platform/fabric/e2e-test/feature/fabric-ca.feature +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -Feature: Fabric-CA Service - As a user I want to be able to use the Fabric-CA for generation of certificates - -##@doNotDecompose -##@interop -##@daily -Scenario Outline: FAB-6489: Interoperability Test using based orderer with a db using the with chaincode - Given I have a bootstrapped fabric network of type using state-database with tls - And I use the interface - And I enroll the following users using fabric-ca - | username | organization | password | role | certType | - | latitia | org1.example.com | h3ll0 | admin | x509 | - | scott | org2.example.com | th3r3 | member | x509 | - | adnan | org1.example.com | wh@tsup | member | x509 | - When an admin sets up a channel - And an admin deploys chaincode at path "" with args ["init","a","1000","b","2000"] with name "mycc" with language "" - And I wait "10" seconds - When a user "adnan" queries on the chaincode with args ["query","a"] - Then a user receives a success response of 1000 - And I wait "5" seconds - When a user "adnan" invokes on the chaincode with args ["invoke","a","b","10"] - And I wait "10" seconds - When a user "scott" queries on the chaincode with args ["query","a"] from "peer0.org2.example.com" - Then a user receives a success response of 990 from "peer0.org2.example.com" - When a user "scott" invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on "peer0.org2.example.com" - And I wait "5" seconds - When a user "latitia" queries on the chaincode with args ["query","a"] - Then a user receives a success response of 980 - # We should use the JavaSDK once the TLS version of this is working correctly -Examples: - | type | database | interface | path | language | - #| solo | leveldb | Java SDK | github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd | GOLANG | - | solo | leveldb | NodeJS SDK | github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd | GOLANG | - | kafka | couchdb | CLI | ../../fabric-test/chaincodes/example02/node | NODE | - | solo | leveldb | NodeJS SDK | ../../fabric-samples/chaincode/chaincode_example02/java | JAVA | - - -##@daily -Scenario Outline: FAB-11621: JavaSDK interoperability Test using chaincode shim - Given I have a bootstrapped fabric network - And I use the Java SDK interface - And I enroll the following users using fabric-ca - | username | organization | password | role | - | latitia | org1.example.com | h3ll0 | admin | - | scott | org2.example.com | th3r3 | member | - | adnan | org1.example.com | wh@tsup | member | - When an admin sets up a channel - And an admin deploys chaincode at path "" with args ["init","a","1000","b","2000"] with name "mycc" with language "" - And I wait "10" seconds - When a user "adnan" queries on the chaincode with args ["query","a"] - Then a user receives a success response of 1000 - And I wait "5" seconds - When a user "adnan" invokes on the chaincode with args ["invoke","a","b","10"] - And I wait "10" seconds - When a user "scott" queries on the chaincode with args ["query","a"] from "peer0.org2.example.com" - Then a user receives a success response of 990 from "peer0.org2.example.com" - When a user "scott" invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on "peer0.org2.example.com" - And I wait "5" seconds - When a user "latitia" queries on the chaincode with args ["query","a"] - Then a user receives a success response of 980 -Examples: - | path | language | - | github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd | GOLANG | - | ../../fabric-test/chaincodes/example02/node | NODE | - | ../../fabric-samples/chaincode/chaincode_example02/java | JAVA | - - #@daily - #Scenario Outline: FAB-11728: Identity Mixer Test Happy Path - # Given an admin creates an idemix MSP for organization "org1.example.com" - # Given I have a bootstrapped fabric network with tls - # And I use the interface - # And I enroll the following users using fabric-ca - # | username | organization | password | role | certType | - # | latitia | org1.example.com | h3ll0 | admin | idemix | - # | scott | org2.example.com | th3r3 | member | idemix | - # | adnan | org1.example.com | wh@tsup | member | idemix | - # When an admin sets up a channel - # And an admin deploys chaincode at path "" with args ["init","a","1000","b","2000"] with name "mycc" with language "" - # And I wait "5" seconds - # When a user "adnan" queries on the chaincode with args ["query","a"] - # Then a user receives a success response of 1000 - # And I wait "5" seconds - # When a user "adnan" invokes on the chaincode with args ["invoke","a","b","10"] - # And I wait "5" seconds - # When a user "scott" queries on the chaincode with args ["query","a"] from "peer0.org2.example.com" - # Then a user receives a success response of 990 from "peer0.org2.example.com" - # When a user "scott" invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on peer0.org2.example.com - # And I wait "5" seconds - # When a user "latitia" queries on the chaincode with args ["query","a"] - # Then a user receives a success response of 980 - #Examples: - # | interface | path | language | - # | CLI | github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd | GOLANG | - # | Java SDK | github.com/hyperledger/fabric-sdk-java/chaincode/gocc/sample1/src/github.com/example_cc | GOLANG | - # | NodeJS SDK | ../../fabric-test/chaincodes/example02/java | JAVA | diff --git a/app/platform/fabric/e2e-test/feature/gossip.feature b/app/platform/fabric/e2e-test/feature/gossip.feature deleted file mode 100644 index d79d154bb..000000000 --- a/app/platform/fabric/e2e-test/feature/gossip.feature +++ /dev/null @@ -1,411 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -Feature: Gossip Service - As a user I expect the gossip component work correctly - -@daily -Scenario Outline: [FAB-4663] [FAB-4664] [FAB-4665] A non-leader peer goes down by , comes back up and catches up eventually. - Given the FABRIC_LOGGING_SPEC environment variable is gossip.election=DEBUG - And I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - # the following wait is for Gossip leadership states to be stabilized - And I wait "30" seconds - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" on the initial leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 1000 from the initial leader peer of "org1" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on the initial leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 990 from the initial leader peer of "org1" - - When the initial non-leader peer of "org1" is taken down by doing a - And I wait "5" seconds - ## Now do 3 invoke-queries in leader peer - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on the initial leader peer of "org1" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 980 from the initial leader peer of "org1" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] on the initial leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 960 from the initial leader peer of "org1" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","30"] on the initial leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 930 from the initial leader peer of "org1" - - When the initial non-leader peer of "org1" comes back up by doing a - And I wait "60" seconds - - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial non-leader peer of "org1" - Then a user receives a success response of 930 from the initial non-leader peer of "org1" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","40"] on the initial non-leader peer of "org1" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 890 from the initial leader peer of "org1" - - Examples: - | takeDownType | bringUpType | - | stop | start | - | pause | unpause | - | disconnect | connect | - -@daily -Scenario Outline: [FAB-4667] [FAB-4671] [FAB-4672] A leader peer goes down by , comes back up *after* another leader is elected, catches up. - Given the FABRIC_LOGGING_SPEC environment variable is gossip.election=DEBUG - And I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - # the following wait is for Gossip leadership states to be stabilized - And I wait "30" seconds - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" on the initial non-leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial non-leader peer of "org1" - Then a user receives a success response of 1000 from the initial non-leader peer of "org1" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on the initial non-leader peer of "org1" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on the initial non-leader peer of "org1" - Then a user receives a success response of 990 from the initial non-leader peer of "org1" - - When the initial leader peer of "org1" is taken down by doing a - # Give time to leader change to happen - And I wait "30" seconds - Then the initial non-leader peer of "org1" has become the leader - ## Now do 3 invoke-queries - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on the initial non-leader peer of "org1" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on the initial non-leader peer of "org1" - Then a user receives a success response of 980 from the initial non-leader peer of "org1" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] on the initial non-leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial non-leader peer of "org1" - Then a user receives a success response of 960 from the initial non-leader peer of "org1" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","30"] on the initial non-leader peer of "org1" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial non-leader peer of "org1" - Then a user receives a success response of 930 from the initial non-leader peer of "org1" - - When the initial leader peer of "org1" comes back up by doing a - And I wait "60" seconds - - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 930 from the initial leader peer of "org1" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","40"] on the initial leader peer of "org1" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 890 from the initial leader peer of "org1" - - Examples: - | takeDownType | bringUpType | - | stop | start | - | pause | unpause | - | disconnect | connect | - -@daily -Scenario Outline: [FAB-4673] [FAB-4674] [FAB-4675] A leader peer goes down by , comes back up *before* another leader is elected, catches up. - Given the FABRIC_LOGGING_SPEC environment variable is gossip.election,peer.gossip=DEBUG - And I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - # the following wait is for Gossip leadership states to be stabilized - And I wait "30" seconds - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" on the initial non-leader peer of "org1" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on the initial non-leader peer of "org1" - Then a user receives a success response of 1000 from the initial non-leader peer of "org1" - - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on the initial non-leader peer of "org1" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on the initial non-leader peer of "org1" - Then a user receives a success response of 990 from the initial non-leader peer of "org1" - - ## take down leader, invoke in non-leader, wait 5 seconds and bring back up the initial leader - When the initial leader peer of "org1" is taken down by doing a - And a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on the initial non-leader peer of "org1" - And I wait "3" seconds - Then the initial non-leader peer of "org1" has not become the leader - When the initial leader peer of "org1" comes back up by doing a - And I wait "30" seconds - - When a user queries on the chaincode named "mycc" with args ["query","a"] on the initial leader peer of "org1" - Then a user receives a success response of 980 from the initial leader peer of "org1" - - Examples: - | takeDownType | bringUpType | - | stop | start | - | pause | unpause | - | disconnect | connect | - -@daily -Scenario Outline: [FAB-4676] [FAB-4677] [FAB-4678] "All peers in an organization go down via , then catch up after ". - Given the FABRIC_LOGGING_SPEC environment variable is gossip.election=DEBUG - And I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - # the following wait is for Gossip leadership states to be stabilized - And I wait "30" seconds - - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 980 - - #take down both peers in "org2" - When "peer0.org2.example.com" is taken down by doing a - And I wait "5" seconds - When "peer1.org2.example.com" is taken down by doing a - And I wait "5" seconds - ## Now do 3 invoke-queries in a peer from org1 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 970 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 950 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","30"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 920 - - When "peer0.org2.example.com" comes back up by doing a - And I wait "60" seconds - When "peer1.org2.example.com" comes back up by doing a - And I wait "60" seconds - - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org2.example.com" - Then a user receives a success response of 920 from "peer0.org2.example.com" - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org2.example.com" - Then a user receives a success response of 920 from "peer1.org2.example.com" - - Examples: - | takeDownType | bringUpType | - | stop | start | - | pause | unpause | - | disconnect | connect | - -@daily -Scenario Outline: [FAB-4679] [FAB-4680] [FAB-4681] In leader-selection setup, a non-leader peer goes down by , comes back up and catches up eventually. - # Select Peer0 of both org as leader and turn leader election off - - Given the CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG1 environment variable is true - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG2 environment variable is true - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG2 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG2 environment variable is false - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG2 environment variable is false - - # Bootstrap the network create channel, deploy chaincode - And I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - # Take down the non-leader peer - When "peer1.org1.example.com" is taken down by doing a - And I wait "5" seconds - - # Now do three invoke-query pairs - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on "peer0.org1.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 980 from "peer0.org1.example.com" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] on "peer0.org1.example.com" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 960 from "peer0.org1.example.com" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","30"] on "peer0.org1.example.com" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 930 from "peer0.org1.example.com" - - # Bring back up the non-leader peer - When "peer1.org1.example.com" comes back up by doing a - And I wait "60" seconds - - # Test with the non-leader peer - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org1.example.com" - Then a user receives a success response of 930 from "peer1.org1.example.com" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","40"] on "peer1.org1.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org1.example.com" - Then a user receives a success response of 890 from "peer1.org1.example.com" - - Examples: - | takeDownType | bringUpType | - | stop | start | - | pause | unpause | - | disconnect | connect | - -@daily -Scenario Outline: [FAB-4683] [FAB-4684] [FAB-4685] In leader-selection setup, leader peer goes down by for at least seconds, comes back up and catches up eventually. - - # Select Peer0 of both org as leader and turn leader election off - Given the CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG1 environment variable is true - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG2 environment variable is true - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG2 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG2 environment variable is false - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG2 environment variable is false - - # Bootstrap the network create channel, deploy chaincode - And I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - # Take down the leader peer - When "peer0.org2.example.com" is taken down by doing a - And I wait "5" seconds - - # Now do three invoke-query pairs - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] on "peer0.org1.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 970 from "peer0.org1.example.com" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","30"] on "peer0.org1.example.com" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 940 from "peer0.org1.example.com" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","40"] on "peer0.org1.example.com" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 900 from "peer0.org1.example.com" - - When I wait "" seconds - - # Bring back up the leader peer - When "peer0.org2.example.com" comes back up by doing a - And I wait "60" seconds - - # Query the leader peer - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org2.example.com" - Then a user receives a success response of 900 from "peer0.org2.example.com" - - Examples: - | takeDownType | bringUpType | minDownDuration | - | stop | start | 15 | - | pause | unpause | 15 | - | disconnect | connect | 15 | - | stop | start | 90 | - | pause | unpause | 90 | - | disconnect | connect | 90 | - - -@daily - Scenario: [FAB-4666] A non-leader peer, that joins an already-active channel--is expected to have all the blocks eventually. - - Given the FABRIC_LOGGING_SPEC environment variable is gossip=DEBUG - And I have a bootstrapped fabric network of type kafka - When an admin creates a channel - - #Join only three peers - When an admin fetches genesis information using peer "peer0.org1.example.com" - And an admin fetches genesis information using peer "peer0.org2.example.com" - And an admin fetches genesis information using peer "peer1.org1.example.com" - And an admin makes peer "peer0.org1.example.com" join the channel - And an admin makes peer "peer0.org2.example.com" join the channel - And an admin makes peer "peer1.org1.example.com" join the channel - - # the following wait is for Gossip leadership states to be stabilized - And I wait "30" seconds - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - And I wait "5" seconds - ## Now do 3 invoke-queries in leader peer - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 970 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","30"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 940 - - #Join the rest of the peers - When an admin fetches genesis information using peer "peer1.org2.example.com" - And an admin makes peer "peer1.org2.example.com" join the channel - - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org2.example.com" - Then a user receives a success response of 940 from "peer1.org2.example.com" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","40"] on "peer1.org2.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org2.example.com" - Then a user receives a success response of 900 from "peer1.org2.example.com" - - -@daily - Scenario: [FAB-4682] In leader-selection, a non-leader peer, that joins an already-active channel--is expected to have all the blocks eventually. - - # Select Peer0 of both org as leader and turn leader election off - Given the CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG1 environment variable is true - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER0_ORG2 environment variable is true - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER0_ORG2 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG1 environment variable is false - And the CORE_PEER_GOSSIP_ORGLEADER_PEER1_ORG2 environment variable is false - And the CORE_PEER_GOSSIP_USELEADERELECTION_PEER1_ORG2 environment variable is false - - And I have a bootstrapped fabric network of type kafka - When an admin creates a channel - - #Join only three peers - When an admin fetches genesis information using peer "peer0.org1.example.com" - And an admin fetches genesis information using peer "peer0.org2.example.com" - And an admin fetches genesis information using peer "peer1.org1.example.com" - And an admin makes peer "peer0.org1.example.com" join the channel - And an admin makes peer "peer0.org2.example.com" join the channel - And an admin makes peer "peer1.org1.example.com" join the channel - - # the following wait is for Gossip leadership states to be stabilized - And I wait "30" seconds - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - And I wait "5" seconds - ## Now do 3 invoke-queries in leader peer - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 970 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","30"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 940 - - #Join the rest of the peers - When an admin fetches genesis information using peer "peer1.org2.example.com" - And an admin makes peer "peer1.org2.example.com" join the channel - - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org2.example.com" - Then a user receives a success response of 940 from "peer1.org2.example.com" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","40"] on "peer1.org2.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org2.example.com" - Then a user receives a success response of 900 from "peer1.org2.example.com" diff --git a/app/platform/fabric/e2e-test/feature/ledger.feature b/app/platform/fabric/e2e-test/feature/ledger.feature deleted file mode 100644 index 372979119..000000000 --- a/app/platform/fabric/e2e-test/feature/ledger.feature +++ /dev/null @@ -1,240 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -Feature: Ledger Service - As a user I want to be able to test private chaincode with private data that would not be stored in ledger - - -#This test can be run once following two CRS get merged in master -#1.[FAB-5874] Support for queries over pvtdata -#2.[FAB-5080] Chaincode API Support for PrivateData - -@skip -Scenario Outline: FAB-6036-1: Test marbles02_private initMarble, readMarble, deleteMarble, transferMarble, getMarblesByRange, stateTransfer - Given the FABRIC_LOGGING_SPEC environment variable is gossip.election=DEBUG - And I have a bootstrapped fabric network of type - When an admin deploys chaincode at path "github.com/hyperledger/fabric-test/chaincodes/marbles02_private" with args [""] with name "mycc" - - #comment or remove the following 6 lines once we are in phase2 - Given "peer1.org1.example.com" is taken down - And I wait "10" seconds - Given "peer1.org2.example.com" is taken down - And I wait "10" seconds - Given "peer0.org2.example.com" is taken down - And I wait "10" seconds - - #These two marbles are used for getMarblesByRange - When a user invokes on the chaincode named "mycc" with args ["initMarble","001m1","indigo","35","saleem"] - And I wait "10" seconds - When a user invokes on the chaincode named "mycc" with args ["initMarble","004m4","green","35","dire straits"] - - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble1","red","35","tom"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble1"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble1" - And a user receives a response containing "color":"red" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"tom" - - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble2","blue","55","jerry"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble2"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble2" - And a user receives a response containing "color":"blue" - And a user receives a response containing "size":55 - And a user receives a response containing "owner":"jerry" - - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble111","pink","55","jane"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble111" - And a user receives a response containing "color":"pink" - And a user receives a response containing "size":55 - And a user receives a response containing "owner":"jane" - -#Test transferMarble - When a user invokes on the chaincode named "mycc" with args ["transferMarble","marble1","jerry"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble1"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble1" - And a user receives a response containing "color":"red" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"jerry" - -#delete a marble - When a user invokes on the chaincode named "mycc" with args ["delete","marble2"] - And I wait "10" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble2"] - Then a user receives an error response of status:500 - And a user receives an error response of {"Error":"Marble does not exist: marble2"} - And I wait "3" seconds - -# Begin creating marbles to to test transferMarblesBasedOnColor -# When a user invokes on the chaincode named "mycc" with args ["initMarble","marble100","red","5","cassey"] -# And I wait "3" seconds - -# When a user invokes on the chaincode named "mycc" with args ["initMarble","marble101","blue","6","cassey"] -# And I wait "3" seconds - -# When a user invokes on the chaincode named "mycc" with args ["initMarble","marble200","purple","5","ram"] -# And I wait "3" seconds - -# When a user invokes on the chaincode named "mycc" with args ["initMarble","marble201","blue","6","ram"] -# And I wait "3" seconds - -# When a user invokes on the chaincode named "mycc" with args ["transferMarblesBasedOnColor","blue","jerry"] -# And I wait "3" seconds -# When a user queries on the chaincode named "mycc" with args ["readMarble","marble100"] -# Then a user receives a response containing "docType":"marble" -# And a user receives a response containing "name":"marble100" -# And a user receives a response containing "color":"red" -# And a user receives a response containing "size":5 -# And a user receives a response containing "owner":"cassey" - - -# When a user queries on the chaincode named "mycc" with args ["readMarble","marble101"] -# Then a user receives a response containing "docType":"marble" -# And a user receives a response containing "name":"marble101" -# And a user receives a response containing "color":"blue" -# And a user receives a response containing "size":6 -# And a user receives a response containing "owner":"jerry" - - -# When a user queries on the chaincode named "mycc" with args ["readMarble","marble200"] -# Then a user receives a response containing "docType":"marble" -# And a user receives a response containing "name":"marble200" -# And a user receives a response containing "color":"purple" -# And a user receives a response containing "size":5 -# And a user receives a response containing "owner":"ram" - -# When a user queries on the chaincode named "mycc" with args ["readMarble","marble201"] -# Then a user receives a response containing "docType":"marble" -# And a user receives a response containing "name":"marble201" -# And a user receives a response containing "color":"blue" -# And a user receives a response containing "size":6 -# And a user receives a response containing "owner":"jerry" -# - -# When a user invokes on the chaincode named "mycc" with args ["queryMarblesByOwner","ram"] -# And I wait "3" seconds - -# Then a user receives a response containing "docType":"marble" -# And a user receives a response containing "name":"marble200" -# And a user receives a response containing "color":"purple" -# And a user receives a response containing "size":5 -# And a user receives a response containing "owner":"ram" -# peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarblesByOwner","tom"]}' -# peer chaincode query -C myc1 -n marbles -c '{"Args":["queryMarbles","{\"selector\":{\"owner\":\"tom\"}}"]}' - -# state transfer - When a user invokes on the chaincode named "mycc" with args ["transferMarble","marble111","jerry"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble111" - And a user receives a response containing "color":"pink" - And a user receives a response containing "size":55 - And a user receives a response containing "owner":"jerry" - And I wait "10" seconds - - When a user invokes on the chaincode named "mycc" with args ["transferMarble","marble111","tom"] - And I wait "3" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble111" - And a user receives a response containing "color":"pink" - And a user receives a response containing "size":55 - And a user receives a response containing "owner":"tom" - -Given the initial non-leader peer of "org1" comes back up - And I wait "10" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble111"] on the initial non-leader peer of "org1" - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"marble111" - And a user receives a response containing "color":"pink" - And a user receives a response containing "size":55 - And a user receives a response containing "owner":"tom" - -# Test getMarblesByRange - When a user queries on the chaincode named "mycc" with args ["getMarblesByRange","001m1", "005m4"] - And I wait "3" seconds - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"001m1" - And a user receives a response containing "color":"indigo" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"saleem" - - Then a user receives a response containing "docType":"marble" - And a user receives a response containing "name":"004m4" - And a user receives a response containing "color":"green" - And a user receives a response containing "size":35 - And a user receives a response containing "owner":"dire straits" - - Examples: - | type | database | - | kafka | leveldb | - | kafka | couchdb | - | solo | leveldb | - | solo | couchdb | - - -@skip -Scenario Outline: FAB-6036-2: Test marbles02_private : getHistoryForMarble - Given I have a bootstrapped fabric network of type - When an admin deploys chaincode at path "github.com/hyperledger/fabric-test/chaincodes/marbles02_private" with args [""] with name "mycc" - - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble1","red","35","tom"] - And I wait "10" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble1"] - Then a user receives a success response containing "docType":"marble" - And a user receives a success response containing "name":"marble1" - And a user receives a success response containing "color":"red" - And a user receives a success response containing "size":35 - And a user receives a success response containing "owner":"tom" - - - When a user invokes on the chaincode named "mycc" with args ["initMarble","marble201","blue","6","ram"] - And I wait "10" seconds - # Test getHistoryForMarble - When a user queries on the chaincode named "mycc" with args ["getHistoryForMarble","marble1"] - And I wait "10" seconds - Then a user receives a success response containing "TxId" - And a user receives a success response containing "Value":{"docType":"marble","name":"marble1","color":"red","size":35,"owner":"tom"} - And a user receives a success response containing "Timestamp" - And a user receives a success response containing "IsDelete":"false" - - #delete a marble - When a user invokes on the chaincode named "mycc" with args ["delete","marble201"] - And I wait "20" seconds - When a user queries on the chaincode named "mycc" with args ["readMarble","marble201"] - Then a user receives a success response of status:500 with error status - And a user receives a success response of {"Error":"Marble does not exist: marble201"} with error status - And I wait "10" seconds - - - #Test getHistoryForDeletedMarble - When a user queries on the chaincode named "mycc" with args ["getHistoryForMarble","marble201"] - And I wait "10" seconds - Then a user receives a success response containing "TxId" - And a user receives a success response containing "Value":{"docType":"marble","name":"marble201","color":"blue","size":6,"owner":"ram"} - And a user receives a success response containing "Timestamp" - And a user receives a success response containing "IsDelete":"false" - And I wait "10" seconds - Then a user receives a success response containing "TxId" - And a user receives a success response containing "Value":{"docType":"marble","name":"marble201","color":"blue","size":6,"owner":"ram"} - And a user receives a success response containing "Timestamp" - And a user receives a success response containing "IsDelete":"true" - - Examples: - | type | database | - | solo | leveldb | - | solo | couchdb | - | kafka | leveldb | - | kafka | couchdb | diff --git a/app/platform/fabric/e2e-test/feature/orderer.feature b/app/platform/fabric/e2e-test/feature/orderer.feature deleted file mode 100644 index 83e2d4bfa..000000000 --- a/app/platform/fabric/e2e-test/feature/orderer.feature +++ /dev/null @@ -1,353 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -Feature: Orderer Service - As a user I want to be able to have my transactions ordered correctly - -#@doNotDecompose -@skip -Scenario: FAB-1335: Resilient Kafka Orderer and Brokers - Given the KAFKA_DEFAULT_REPLICATION_FACTOR environment variable is 1 - And the CONFIGTX_ORDERER_BATCHSIZE_MAXMESSAGECOUNT environment variable is 10 - And the CONFIGTX_ORDERER_BATCHTIMEOUT environment variable is 10 minutes - And I have a bootstrapped fabric network of type kafka - When 10 unique messages are broadcasted - Then I get 10 successful broadcast responses - #When the topic partition leader is stopped - When I stop the current kafka topic partition leader - And 10 unique messages are broadcasted - Then I get 10 successful broadcast responses - And all 20 messages are delivered in 1 block - -@skip -Scenario: FAB-1306: Adding a new Kafka Broker - Given a kafka cluster - And an orderer connected to the kafka cluster - When a new organization NewOrg certificate is added - Then the NewOrg is able to connect to the kafka cluster - -@skip -Scenario: FAB-1306: Multiple organizations in a kafka cluster, remove 1 - Given a certificate from Org1 is added to the kafka orderer network - And a certificate from Org2 is added to the kafka orderer network - And an orderer connected to the kafka cluster - When authorization for Org2 is removed from the kafka cluster - Then the Org2 cannot connect to the kafka cluster - -@skip -Scenario: FAB-1306: Multiple organizations in a cluster - remove all, reinstate 1. - Given a certificate from Org1 is added to the kafka orderer network - And a certificate from Org2 is added to the kafka orderer network - And a certificate from Org3 is added to the kafka orderer network - And an orderer connected to the kafka cluster - When authorization for Org2 is removed from the kafka cluster - Then the Org2 cannot connect to the kafka cluster - And the orderer functions successfully - When authorization for Org1 is removed from the kafka cluster - Then the Org1 cannot connect to the kafka cluster - And the orderer functions successfully - When authorization for Org3 is removed from the kafka cluster - Then the Org3 cannot connect to the kafka cluster - And the zookeeper notifies the orderer of the disconnect - And the orderer stops sending messages to the cluster - When authorization for Org1 is added to the kafka cluster - And I wait "15" seconds - Then the Org1 is able to connect to the kafka cluster - And the orderer functions successfully - - -@smoke -Scenario: FAB-3852: Message Payloads Less than 1MB, for kafka-based orderer using the NodeJS SDK interface - Given I have a bootstrapped fabric network of type kafka using state-database couchdb with tls - And I use the NodeJS SDK interface - # Following lines are equivaent to "When an admin sets up a channel" - When an admin creates a channel - When an admin fetches genesis information using peer "peer0.org1.example.com" - When an admin makes all peers join the channel - # Following lines are equivalent to "When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args [""]" - When an admin installs chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args [""] on all peers - When an admin instantiates the chaincode on "peer0.org1.example.com" - - # 1K - And a user invokes on the chaincode named "mycc" with random args ["put","a","{random_value}"] of length 1024 - And I wait "3" seconds - And a user queries on the chaincode named "mycc" with args ["get","a"] - Then a user receives a response containing a value of length 1024 - And a user receives a response with the random value - # 64K - When a user invokes on the chaincode named "mycc" with random args ["put","b","{random_value}"] of length 65536 - And I wait "3" seconds - And a user queries on the chaincode named "mycc" with args ["get","b"] - Then a user receives a response containing a value of length 65536 - # - When a user invokes on the chaincode named "mycc" with random args ["put","d","{random_value}"] of length 100000 - And I wait "3" seconds - And a user queries on the chaincode named "mycc" with args ["get","d"] - Then a user receives a response containing a value of length 100000 - # - When a user invokes on the chaincode named "mycc" with random args ["put","g","{random_value}"] of length 130610 - And I wait "3" seconds - And a user queries on the chaincode named "mycc" with args ["get","g"] - Then a user receives a response containing a value of length 130610 - And a user receives a response with the random value - - -@daily -Scenario Outline: FAB-3852: Message Payloads Less than 1MB, for orderer using the interface - Given I have a bootstrapped fabric network of type - And I use the interface - # Following lines are equivaent to "When an admin sets up a channel" - When an admin creates a channel - When an admin fetches genesis information using peer "peer0.org1.example.com" - When an admin makes all peers join the channel - # Following lines are equivalent to "When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args [""]" - When an admin installs chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args [""] on all peers - When an admin instantiates the chaincode on "peer0.org1.example.com" - - # 1K - And a user invokes on the chaincode named "mycc" with random args ["put","a","{random_value}"] of length 1024 - And I wait "3" seconds - And a user queries on the chaincode named "mycc" with args ["get","a"] - Then a user receives a response containing a value of length 1024 - And a user receives a response with the random value - # 64K - When a user invokes on the chaincode named "mycc" with random args ["put","b","{random_value}"] of length 65536 - And I wait "3" seconds - And a user queries on the chaincode named "mycc" with args ["get","b"] - Then a user receives a response containing a value of length 65536 - # - When a user invokes on the chaincode named "mycc" with random args ["put","d","{random_value}"] of length 100000 - And I wait "3" seconds - And a user queries on the chaincode named "mycc" with args ["get","d"] - Then a user receives a response containing a value of length 100000 - # - When a user invokes on the chaincode named "mycc" with random args ["put","g","{random_value}"] of length 130610 - And I wait "3" seconds - And a user queries on the chaincode named "mycc" with args ["get","g"] - Then a user receives a response containing a value of length 130610 - And a user receives a response with the random value -Examples: - | type | interface | - | solo | CLI | - | kafka | CLI | - | solo | NodeJS SDK | - | kafka | NodeJS SDK | - - -@daily -Scenario Outline: FAB-3851: Message Payloads of size , for orderer - Given I have a bootstrapped fabric network of type using state-database couchdb - And I use the NodeJS SDK interface - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args [""] - - When a user invokes on the chaincode named "mycc" with random args ["put","g","{random_value}"] of length - And I wait "7" seconds - And a user queries on the chaincode named "mycc" with args ["get","g"] - Then a user receives a response containing a value of length - And a user receives a response with the random value - - When a user invokes on the chaincode named "mycc" with random args ["put","g","{random_value}"] of length - And I wait "7" seconds - And a user queries on the chaincode named "mycc" with args ["get","g"] - Then a user receives a response containing a value of length - And a user receives a response with the random value -Examples: - | type | size | comment | - | solo | 1048576 | 1MB | - | solo | 2097152 | 2MB | - | solo | 4194304 | 4MB | - | kafka | 125000 | 125KB (with default msg size) | - | kafka | 320000 | 320KB (with default msg size) | - | kafka | 490000 | 490KB (with default msg size) | - #| kafka | 1000012 | 1MB | - - -@daily -Scenario Outline: FAB-3859: Kafka Network with Large Message Size with Configuration Tweaks - Given the ORDERER_ABSOLUTEMAXBYTES environment variable is - And the ORDERER_PREFERREDMAXBYTES environment variable is - And the KAFKA_MESSAGE_MAX_BYTES environment variable is - And the KAFKA_REPLICA_FETCH_MAX_BYTES environment variable is - And the KAFKA_REPLICA_FETCH_RESPONSE_MAX_BYTES environment variable is - Given I have a bootstrapped fabric network of type kafka - And I use the NodeJS SDK interface - When an admin sets up a channel named "configsz" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args ["init"] with name "mapIt" on channel "configsz" - - When a user invokes on the channel "configsz" using chaincode named "mapIt" with random args ["put","g","{random_value}"] of length - And I wait "10" seconds - And a user queries on the channel "configsz" using chaincode named "mapIt" for the random key with args ["get","g"] on "peer0.org1.example.com" - Then a user receives a response containing a value of length - And a user receives a response with the random value -Examples: - | absoluteMaxBytes | preferredMaxBytes | messageMaxBytes | replicaFetchMaxBytes | replicaFetchResponseMaxBytes | size | comment | - | 20 MB | 2 MB | 4 MB | 2 MB | 20 MB | 1048576 | 1MB | - | 1 MB | 1 MB | 4 MB | 2 MB | 10 MB | 1048576 | 1MB | - | 1 MB | 1 MB | 4 MB | 1.5 MB | 10 MB | 1048576 | 1MB | - | 4 MB | 4 MB | 4 MB | 4 MB | 10 MB | 1048576 | 1MB | - | 8 MB | 8 MB | 8 MB | 8 MB | 10 MB | 2097152 | 2MB | - | 16 MB | 16 MB | 16 MB | 16 MB | 20 MB | 4194304 | 4MB | - | 11 MB | 2 MB | 22 MB | 11 MB | 20 MB | 10485760 | 10MB | - -@daily -Scenario Outline: FAB-3857: key/value pairs in Payloads of size - Given I have a bootstrapped fabric network of type kafka using state-database couchdb - And I use the NodeJS SDK interface - When an admin sets up a channel - When an admin deploys chaincode at path "github.com/hyperledger/fabric-test/chaincodes/mapkeys/go" with args [""] - - When a user invokes on the chaincode named "mycc" with args ["put","c","3F","d","76D"] - When I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["get","c"] - Then a user receives a success response of 3F - When a user queries on the chaincode named "mycc" with args ["get","d"] - Then a user receives a success response of 76D - - When a user invokes args with random key/values of length on the chaincode named "mycc" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with dynamic args ["get","{last_key}"] on "peer0.org1.example.com" - Then a user receives a response containing a value of length - And a user receives a response with the random value -Examples: - | size | count | comment | - #| 2048 | 20 | caused IOError: resource temporarily unavailable | - | 512 | 10 | | - #| 256 | 1024 | caused IOError: resource temporarily unavailable | - | 64 | 256 | | - - -#@daily -Scenario: FAB-4686: Test taking down all kafka brokers and bringing back last 3 - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "10" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - When "kafka0" is taken down - And I wait "5" seconds - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - When a user queries on the chaincode with args ["query","a"] - Then a user receives a success response of 980 - - When "kafka1" is taken down - And "kafka2" is taken down - And "kafka3" is taken down - And I wait "5" seconds - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "10" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 980 - And I wait "5" seconds - - When "kafka3" comes back up - And I wait "60" seconds - And "kafka2" comes back up - And I wait "60" seconds - And "kafka1" comes back up - And I wait "90" seconds - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "10" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 970 - - -# skip FAB-4770 until FAB-6335 gets fixed so that we reliably stop the correct kafkabroker -@skip -@daily -Scenario Outline: [FAB-4770] [FAB-4845]: all kafka brokers in the RF set, and in LIFO order - # By default, the number of kafka brokers in the RF set is 3(KAFKA_DEFAULT_REPLICATION_FACTOR), - # and the min ISR is 2(KAFKA_MIN_INSYNC_REPLICAS) - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - When I the current kafka topic partition leader - And I wait "60" seconds - Then the broker is reported as down - And ensure kafka ISR set contains 2 brokers - #new leader is elected - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user queries on the chaincode with args ["query","a"] - Then a user receives a success response of 980 - - When I the current kafka topic partition leader - And I wait "65" seconds - Then the broker is reported as down - And ensure kafka ISR set contains 1 brokers - And I wait "10" seconds - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "60" seconds - # Do not do this service_unavailable check, to see query value returned for an error - #Then a user receives an error response of SERVICE_UNAVAILABLE - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 980 - - When I the current kafka topic partition leader - And I wait "60" seconds - #Then the broker is reported as down - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "10" seconds - Then a user receives an error response of SERVICE_UNAVAILABLE - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 980 - - # Stopping Queue: Last In First Out - When I a former kafka topic partition leader - And I wait "60" seconds - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "10" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 980 - - When I a former kafka topic partition leader - And I wait "60" seconds - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "10" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 970 - - When I a former kafka topic partition leader - And I wait "60" seconds - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "10" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 960 - Examples: - | takeDownType | bringUpType | - | stop | start | - | pause | unpause | - | disconnect | connect | - -@daily -Scenario Outline: FAB-4808,FAB-3937,FAB-3938: Orderer_BatchTimeOut is honored, for orderer - Given the CONFIGTX_ORDERER_BATCHTIMEOUT environment variable is - And I have a bootstrapped fabric network of type - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of - And I wait "16" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of -Examples: - | type | envValue | firstQuery | lastQuery | - | solo | 2 seconds | 990 | 990 | - | kafka | 2 seconds | 990 | 990 | - | solo | 20 seconds | 1000 | 990 | - | kafka | 20 seconds | 1000 | 990 | diff --git a/app/platform/fabric/e2e-test/feature/package.json b/app/platform/fabric/e2e-test/feature/package.json deleted file mode 100644 index 2771338e2..000000000 --- a/app/platform/fabric/e2e-test/feature/package.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "Integration", - "version": "1.2.0", - "description": "Behave Node.js SDK Integration", - "keywords": [ - "Behave node-sdk integration", - "Call node-sdk apis from behave" - ], - "engines": { - "node": ">=8.9.4 <9.0", - "npm": ">=5.6.0 <6.0" - }, - "license": "Apache-2.0", - "dependencies": { - "fabric-ca-client": "unstable-1.4", - "fabric-client": "unstable-1.4", - "fabric-network": "unstable-1.4", - "fs-extra": "^2.0.0", - "log4js": "^0.6.38" - }, - "devDependencies": { - "make-runnable": "^1.3.6", - "sync-dir": "^1.5.1" - } -} diff --git a/app/platform/fabric/e2e-test/feature/peer.feature b/app/platform/fabric/e2e-test/feature/peer.feature deleted file mode 100644 index b01eceb43..000000000 --- a/app/platform/fabric/e2e-test/feature/peer.feature +++ /dev/null @@ -1,398 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -Feature: Peer Service - As a user I want to be able have channels and chaincodes to execute - -#@doNotDecompose -@daily -Scenario Outline: FAB-3505: Test chaincode example02 deploy, invoke, and query, with orderer - Given I have a bootstrapped fabric network of type - And I use the interface - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - When "peer0.org2.example.com" is taken down - And a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - And "peer0.org2.example.com" comes back up - And I wait "10" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org2.example.com" - Then a user receives a success response of 980 from "peer0.org2.example.com" -Examples: - | type | security | interface | - | solo | without tls | NodeJS SDK | - | kafka | with tls | NodeJS SDK | - | solo | without tls | CLI | - | kafka | with tls | CLI | - - -@daily -Scenario Outline: FAB-1440, FAB-3861: Basic Chaincode Execution - orderer type, using , - Given I have a bootstrapped fabric network of type using state-database - When an admin sets up a channel - And an admin deploys chaincode - When a user queries on the chaincode - Then a user receives a success response of 100 - When a user invokes on the chaincode - And I wait "5" seconds - And a user queries on the chaincode - Then a user receives a success response of 95 -Examples: - | type | database | security | - | solo | leveldb | with tls | - | solo | leveldb | without tls | - | solo | couchdb | with tls | - | solo | couchdb | without tls | - | kafka | leveldb | with tls | - | kafka | leveldb | without tls | - | kafka | couchdb | with tls | - | kafka | couchdb | without tls | - - -@daily -Scenario Outline: FAB-3865: Multiple Channels Per Peer, with orderer - Given I have a bootstrapped fabric network of type - When an admin sets up a channel named "chn1" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init", "a", "1000" , "b", "2000"] with name "cc1" on channel "chn1" - When an admin sets up a channel named "chn2" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args ["init"] with name "cc2" on channel "chn2" - When a user invokes on the channel "chn2" using chaincode named "cc2" with args ["put", "a", "1000"] - And I wait "5" seconds - And a user queries on the channel "chn2" using chaincode named "cc2" with args ["get", "a"] - # the "map" chaincode adds quotes around the result - Then a user receives a success response of "1000" - When a user invokes on the channel "chn2" using chaincode named "cc2" with args ["put", "b", "2000"] - And I wait "5" seconds - And a user queries on the channel "chn2" using chaincode named "cc2" with args ["get", "b"] - # the "map" chaincode adds quotes around the result - Then a user receives a success response of "2000" - When a user invokes on the channel "chn1" using chaincode named "cc1" with args ["invoke", "a", "b", "10"] - And I wait "5" seconds - And a user queries on the channel "chn1" using chaincode named "cc1" with args ["query", "a"] - Then a user receives a success response of 990 - When a user queries on the channel "chn2" using chaincode named "cc2" with args ["get", "a"] - # the "map" chaincode adds quotes around the result - Then a user receives a success response of "1000" -Examples: - | type | - | solo | - | kafka | - - -@daily -Scenario Outline: FAB-3866: Multiple Chaincodes Per Peer, with orderer - Given I have a bootstrapped fabric network of type - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/eventsender" with args [] with name "eventsender" - When a user invokes on the chaincode named "eventsender" with args ["invoke", "test_event"] - And I wait "5" seconds - And a user queries on the chaincode named "eventsender" with args ["query"] - Then a user receives a success response of {"NoEvents":"1"} - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init", "a", "1000" , "b", "2000"] with name "example02" - When a user invokes on the chaincode named "example02" with args ["invoke", "a", "b", "10"] - And I wait "5" seconds - And a user queries on the chaincode named "example02" with args ["query", "a"] - Then a user receives a success response of 990 - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args ["init"] with name "map" - When a user invokes on the chaincode named "map" with args ["put", "a", "1000"] - And I wait "5" seconds - And a user queries on the chaincode named "map" with args ["get", "a"] - # the "map" chaincode adds quotes around the result - Then a user receives a success response of "1000" - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/marbles02" with args [] with name "marbles" - When a user invokes on the chaincode named "marbles" with args ["initMarble", "marble1", "blue", "35", "tom"] - And I wait "5" seconds - And a user invokes on the chaincode named "marbles" with args ["transferMarble", "marble1", "jerry"] - And I wait "5" seconds - And a user queries on the chaincode named "marbles" with args ["readMarble", "marble1"] - Then a user receives a success response of {"docType":"marble","name":"marble1","color":"blue","size":35,"owner":"jerry"} - When an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/sleeper" with args ["1"] with name "sleeper" - When a user invokes on the chaincode named "sleeper" with args ["put", "a", "1000", "1"] - And I wait "5" seconds - And a user queries on the chaincode named "sleeper" with args ["get", "a", "1"] - Then a user receives a success response of 1000 -Examples: - | type | - | solo | - | kafka | - -Scenario: FAB-6333: A peer with chaincode container disconnects, comes back up, is able to resume regular operation - Given I have a bootstrapped fabric network of type solo - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - And I wait "10" seconds - - # do 1 set of invoke-query on peer1.org1 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on "peer1.org1.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org1.example.com" - Then a user receives a success response of 990 from "peer1.org1.example.com" - - ## Now disconnect a peer - When "peer1.org1.example.com" is taken down by doing a disconnect - And I wait "15" seconds - - # do 2 set of invoke-query on peer0.org1 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","20"] on "peer0.org1.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 970 from "peer0.org1.example.com" - - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","30"] on "peer0.org1.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 940 from "peer0.org1.example.com" - - #bring back up the disconnected peer - When "peer1.org1.example.com" comes back up by doing a connect - And I wait "30" seconds - - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org1.example.com" - Then a user receives a success response of 940 from "peer1.org1.example.com" - - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","40"] on "peer1.org1.example.com" - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer1.org1.example.com" - Then a user receives a success response of 900 from "peer1.org1.example.com" - - -@daily -Scenario Outline: FAB-7150/FAB-7153/FAB-7759: Test Mutual TLS/ClientAuth with based-orderer using interface - Given the CORE_PEER_TLS_CLIENTAUTHREQUIRED environment variable is "true" - And the ORDERER_TLS_CLIENTAUTHREQUIRED environment variable is "true" - And I have a bootstrapped fabric network of type - And I use the interface - When an admin sets up a channel - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init","a","1000","b","2000"] with name "mycc" - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - When "peer0.org2.example.com" is taken down - And a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - And "peer0.org2.example.com" comes back up - And I wait "10" seconds - And a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org2.example.com" - Then a user receives a success response of 980 from "peer0.org2.example.com" - - When an admin queries for the first block - Then an admin receives a response containing org1.example.com - Then an admin receives a response containing org2.example.com - Then an admin receives a response containing example.com - Then an admin receives a response containing CERTIFICATE -Examples: - | type | security | interface | - | kafka | with tls | NodeJS SDK | - | solo | with tls | NodeJS SDK | - | kafka | with tls | CLI | - | solo | with tls | CLI | - | kafka | without tls | CLI | - | solo | without tls | NodeJS SDK | - -@daily -Scenario: FAB-3855: Empty Payload Messages - Given I have a bootstrapped fabric network of type kafka - When an admin sets up a channel named "emptiness" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args ["init"] with name "empty" on channel "emptiness" - When a user invokes on the channel "emptiness" using chaincode named "empty" with args ["put", "a", ""] - And I wait "5" seconds - And a user queries on the channel "emptiness" using chaincode named "empty" with args ["get", "a"] - # the "map" chaincode adds quotes around the result - Then a user receives a success response of "" - - -@daily -Scenario: FAB-8379: Test MSP Identity - Happy Path - Given I have a bootstrapped fabric network of type kafka with tls with organizational units enabled on all nodes - When an admin sets up a channel - And an admin deploys chaincode with args ["init","a","1000","b","2000"] with policy "OR ('org1.example.com.member','org2.example.com.member')" - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 1000 - # Endorsement policies not enforced during initialization - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 990 - - -@daily -Scenario: FAB-8380: Test MSP Identity - Malicious Peer - Given the CORE_PEER_TLS_CLIENTAUTHREQUIRED environment variable is "true" - And the ORDERER_TLS_CLIENTAUTHREQUIRED environment variable is "true" - Given the peer "peer1.org2.example.com" is setup to use a client identity - And I have a bootstrapped fabric network of type kafka with tls with organizational units enabled on all nodes - When an admin sets up a channel - - And an admin deploys chaincode with args ["init","a","1000","b","2000"] with policy "OR ('org1.example.com.peer','org2.example.com.peer')" - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on "peer1.org2.example.com" - Then the logs on peer1.org2.example.com contains "VSCCValidateTx for transaction txId " within 10 seconds - And the logs on peer1.org2.example.com contains "returned error: validation of endorsement policy for chaincode mycc in tx 2:0 failed: signature set did not satisfy policy" within 10 seconds - And I wait "2" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 1000 - - -@daily -Scenario: FAB-8381: Test MSP Identity - Malicious Peer (Clients set as writers in policy) - Given the CORE_PEER_TLS_CLIENTAUTHREQUIRED environment variable is "true" - And the ORDERER_TLS_CLIENTAUTHREQUIRED environment variable is "true" - And I have a bootstrapped fabric network of type kafka with tls with organizational units enabled on all nodes - When an admin sets up a channel - And an admin deploys chaincode with args ["init","a","1000","b","2000"] with policy "OR ('org1.example.com.client','org2.example.com.client')" - - When the admin changes the policy to "OR ('org1.example.com.client','org2.example.com.client')" - And I wait "5" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org2.example.com" - Then a user receives a success response of 1000 from "peer0.org2.example.com" - When a user queries on the chaincode named "mycc" with args ["query","a"] - Then a user receives a success response of 1000 - - When a user using a peer identity invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] - Then the logs on peer0.org2.example.com contains "VSCCValidateTx for transaction txId " within 10 seconds - And the logs on peer0.org2.example.com contains "returned error: validation of endorsement policy for chaincode mycc in tx 2:0 failed: signature set did not satisfy policy" within 10 seconds - And I wait "2" seconds - - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 1000 - - -@daily -Scenario: FAB-8382: Test MSP Identity with inconsistencies - Given the CORE_PEER_TLS_CLIENTAUTHREQUIRED environment variable is "true" - And the ORDERER_TLS_CLIENTAUTHREQUIRED environment variable is "true" - And I have a bootstrapped fabric network of type kafka with tls with organizational units enabled on all Org1ExampleCom nodes - When an admin sets up a channel - And an admin deploys chaincode with args ["init","a","1000","b","2000"] with policy "OR ('org1.example.com.peer','org2.example.com.peer')" - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 1000 - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org2.example.com" - Then a user receives a success response of 1000 from "peer0.org2.example.com" - - When a user invokes on the chaincode named "mycc" with args ["invoke","a","b","10"] on "peer0.org2.example.com" - Then the logs on peer0.org2.example.com contains "VSCCValidateTx for transaction txId " within 10 seconds - And the logs on peer0.org2.example.com contains "returned error: validation of endorsement policy for chaincode mycc in tx 2:0 failed: signature set did not satisfy policy" within 10 seconds - And I wait "2" seconds - When a user queries on the chaincode named "mycc" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 1000 - -@daily -Scenario: FAB-8759: Test querying a peer with two different versions of chaincode - values change - Given I have a bootstrapped fabric network of type kafka with tls - When an admin sets up a channel named "versioningtest" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" with args ["init", "a", "1000" , "b", "2000"] with name "vt" on channel "versioningtest" - When a user queries on the channel "versioningtest" using chaincode named "vt" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 1000 - When a user invokes on the channel "versioningtest" using chaincode named "vt" with args ["invoke","a","b","10"] on "peer0.org2.example.com" - And I wait "5" seconds - When a user queries on the channel "versioningtest" using chaincode named "vt" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 990 - - When an admin installs chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" as version "3" with args ["init","a","1000","b","2000"] with name "vt" on all peers - And I wait "5" seconds - When an admin upgrades the chaincode with name "vt" on channel "versioningtest" to version "3" on peer "peer0.org1.example.com" with args ["init","a","1000","b","2000"] - #When an admin upgrades the chaincode on channel "versioningtest" to version "3" on peer "peer0.org1.example.com" - When a user queries on version "3" of the channel "versioningtest" using chaincode named "vt" with args ["query","a"] on "peer0.org1.example.com" - Then a user receives a success response of 1000 - When a user queries on version "0" of the channel "versioningtest" using chaincode named "vt" with args ["query","a"] on "peer0.org2.example.com" - Then a user receives a success response of 1000 from "peer0.org2.example.com" - - -@daily -Scenario: FAB-8759: Test querying a peer that has two different versions of chaincode - no change in data - Given I have a bootstrapped fabric network of type kafka with tls - When an admin sets up a channel named "versioningtest" - And an admin deploys chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" with args ["init"] with name "vt" on channel "versioningtest" - When a user invokes on the channel "versioningtest" using chaincode named "vt" with args ["put","a","1000"] on "peer0.org2.example.com" - When a user invokes on the channel "versioningtest" using chaincode named "vt" with args ["put","b","2000"] on "peer0.org1.example.com" - When a user invokes on the channel "versioningtest" using chaincode named "vt" with args ["put","c","3000"] on "peer1.org1.example.com" - And I wait "5" seconds - When a user queries on the channel "versioningtest" using chaincode named "vt" with args ["get","a"] on "peer1.org2.example.com" - Then a user receives a success response of "1000" from "peer1.org2.example.com" - When a user queries on the channel "versioningtest" using chaincode named "vt" with args ["get","c"] on "peer1.org2.example.com" - Then a user receives a success response of "3000" from "peer1.org2.example.com" - - When an admin installs chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/map" as version "4" with args ["init"] with name "vt" on all peers - And I wait "5" seconds - When an admin upgrades the chaincode with name "vt" on channel "versioningtest" to version "4" on peer "peer0.org1.example.com" with args ["init"] - When a user queries on version "4" of the channel "versioningtest" using chaincode named "vt" with args ["get","a"] on "peer0.org1.example.com" - Then a user receives a success response of "1000" - When a user queries on version "0" of the channel "versioningtest" using chaincode named "vt" with args ["get","c"] on "peer0.org2.example.com" - Then a user receives a success response of "3000" from "peer0.org2.example.com" - -@daily -Scenario: FAB-7407: Update the channel policies - add an organization - Given I have a bootstrapped fabric network of type solo with tls - When an admin sets up a channel - And an admin deploys chaincode with args ["init","a","1000","b","2000"] - When a user invokes on the chaincode with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user queries on the chaincode with args ["query","a"] - Then a user receives a success response of 990 - - When an admin adds an organization to the channel config - # Assume channel config file is distributed out of band - And all organization admins sign the updated channel config - When the admin updates the channel using peer "peer0.org1.example.com" - - When an admin fetches genesis information using peer "peer0.org1.example.com" - Then the config block file is fetched from peer "peer0.org1.example.com" - Then the updated config block contains Org3ExampleCom - - When a user invokes on the chaincode with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user queries on the chaincode with args ["query","a"] - Then a user receives a success response of 980 - - When the peers from the added organization are added to the network - - When an admin fetches genesis information at block 0 using peer "peer0.org3.example.com" - When an admin makes peer "peer0.org3.example.com" join the channel - And an admin makes peer "peer1.org3.example.com" join the channel - When an admin installs chaincode at path "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd" as version "2" with args ["init","a","1000","b","2000"] on all peers - And I wait "5" seconds - When an admin upgrades the chaincode to version "2" on peer "peer0.org1.example.com" with args ["init","a","1000","b","2000"] - - When a user queries on the chaincode with args ["query","a"] from "peer0.org3.example.com" - Then a user receives a success response of 1000 from "peer0.org3.example.com" - - When a user invokes on the chaincode with args ["invoke","a","b","10"] on "peer0.org2.example.com" - And I wait "5" seconds - When a user queries on the chaincode with args ["query","a"] - Then a user receives a success response of 990 - When a user invokes on the chaincode with args ["invoke","a","b","10"] on "peer1.org3.example.com" - And I wait "5" seconds - When a user queries on the chaincode with args ["query","a"] - Then a user receives a success response of 980 - - When an admin fetches genesis information using peer "peer0.org1.example.com" - Then the config block file is fetched from peer "peer0.org1.example.com" - When an admin removes an organization named Org2ExampleCom from the channel config - And all organization admins sign the updated channel config - When the admin updates the channel using peer "peer0.org1.example.com" - - When an admin fetches genesis information using peer "peer0.org1.example.com" - Then the config block file is fetched from peer "peer0.org1.example.com" - Then the updated config block does not contain Org2ExampleCom - - When a user invokes on the chaincode with args ["invoke","a","b","10"] - And I wait "5" seconds - When a user queries on the chaincode with args ["query","a"] - Then a user receives a success response of 970 - When a user queries on the chaincode with args ["query","a"] from "peer0.org2.example.com" - Then a user receives a success response of 980 from "peer0.org2.example.com" diff --git a/app/platform/fabric/e2e-test/feature/peer/__init__.py b/app/platform/fabric/e2e-test/feature/peer/__init__.py deleted file mode 100644 index 49cd7f3ac..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/app/platform/fabric/e2e-test/feature/peer/admin_pb2.py b/app/platform/fabric/e2e-test/feature/peer/admin_pb2.py deleted file mode 100644 index 597c6be83..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/admin_pb2.py +++ /dev/null @@ -1,429 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/admin.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/admin.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x10peer/admin.proto\x12\x06protos\x1a\x1bgoogle/protobuf/empty.proto\"\x9a\x01\n\x0cServerStatus\x12/\n\x06status\x18\x01 \x01(\x0e\x32\x1f.protos.ServerStatus.StatusCode\"Y\n\nStatusCode\x12\r\n\tUNDEFINED\x10\x00\x12\x0b\n\x07STARTED\x10\x01\x12\x0b\n\x07STOPPED\x10\x02\x12\n\n\x06PAUSED\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\"8\n\x0fLogLevelRequest\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t\"9\n\x10LogLevelResponse\x12\x12\n\nlog_module\x18\x01 \x01(\t\x12\x11\n\tlog_level\x18\x02 \x01(\t2\xd5\x02\n\x05\x41\x64min\x12;\n\tGetStatus\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12=\n\x0bStartServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12<\n\nStopServer\x12\x16.google.protobuf.Empty\x1a\x14.protos.ServerStatus\"\x00\x12H\n\x11GetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x12H\n\x11SetModuleLogLevel\x12\x17.protos.LogLevelRequest\x1a\x18.protos.LogLevelResponse\"\x00\x42]\n\"org.hyperledger.fabric.protos.peerB\x0c\x41\x64minPackageZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_SERVERSTATUS_STATUSCODE = _descriptor.EnumDescriptor( - name='StatusCode', - full_name='protos.ServerStatus.StatusCode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='UNDEFINED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STARTED', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STOPPED', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PAUSED', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ERROR', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='UNKNOWN', index=5, number=5, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=123, - serialized_end=212, -) -_sym_db.RegisterEnumDescriptor(_SERVERSTATUS_STATUSCODE) - - -_SERVERSTATUS = _descriptor.Descriptor( - name='ServerStatus', - full_name='protos.ServerStatus', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='status', full_name='protos.ServerStatus.status', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SERVERSTATUS_STATUSCODE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=58, - serialized_end=212, -) - - -_LOGLEVELREQUEST = _descriptor.Descriptor( - name='LogLevelRequest', - full_name='protos.LogLevelRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='log_module', full_name='protos.LogLevelRequest.log_module', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='log_level', full_name='protos.LogLevelRequest.log_level', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=214, - serialized_end=270, -) - - -_LOGLEVELRESPONSE = _descriptor.Descriptor( - name='LogLevelResponse', - full_name='protos.LogLevelResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='log_module', full_name='protos.LogLevelResponse.log_module', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='log_level', full_name='protos.LogLevelResponse.log_level', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=272, - serialized_end=329, -) - -_SERVERSTATUS.fields_by_name['status'].enum_type = _SERVERSTATUS_STATUSCODE -_SERVERSTATUS_STATUSCODE.containing_type = _SERVERSTATUS -DESCRIPTOR.message_types_by_name['ServerStatus'] = _SERVERSTATUS -DESCRIPTOR.message_types_by_name['LogLevelRequest'] = _LOGLEVELREQUEST -DESCRIPTOR.message_types_by_name['LogLevelResponse'] = _LOGLEVELRESPONSE - -ServerStatus = _reflection.GeneratedProtocolMessageType('ServerStatus', (_message.Message,), dict( - DESCRIPTOR = _SERVERSTATUS, - __module__ = 'peer.admin_pb2' - # @@protoc_insertion_point(class_scope:protos.ServerStatus) - )) -_sym_db.RegisterMessage(ServerStatus) - -LogLevelRequest = _reflection.GeneratedProtocolMessageType('LogLevelRequest', (_message.Message,), dict( - DESCRIPTOR = _LOGLEVELREQUEST, - __module__ = 'peer.admin_pb2' - # @@protoc_insertion_point(class_scope:protos.LogLevelRequest) - )) -_sym_db.RegisterMessage(LogLevelRequest) - -LogLevelResponse = _reflection.GeneratedProtocolMessageType('LogLevelResponse', (_message.Message,), dict( - DESCRIPTOR = _LOGLEVELRESPONSE, - __module__ = 'peer.admin_pb2' - # @@protoc_insertion_point(class_scope:protos.LogLevelResponse) - )) -_sym_db.RegisterMessage(LogLevelResponse) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerB\014AdminPackageZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - - - class AdminStub(object): - """Interface exported by the server. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetStatus = channel.unary_unary( - '/protos.Admin/GetStatus', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=ServerStatus.FromString, - ) - self.StartServer = channel.unary_unary( - '/protos.Admin/StartServer', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=ServerStatus.FromString, - ) - self.StopServer = channel.unary_unary( - '/protos.Admin/StopServer', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=ServerStatus.FromString, - ) - self.GetModuleLogLevel = channel.unary_unary( - '/protos.Admin/GetModuleLogLevel', - request_serializer=LogLevelRequest.SerializeToString, - response_deserializer=LogLevelResponse.FromString, - ) - self.SetModuleLogLevel = channel.unary_unary( - '/protos.Admin/SetModuleLogLevel', - request_serializer=LogLevelRequest.SerializeToString, - response_deserializer=LogLevelResponse.FromString, - ) - - - class AdminServicer(object): - """Interface exported by the server. - """ - - def GetStatus(self, request, context): - """Return the serve status. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def StartServer(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def StopServer(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetModuleLogLevel(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SetModuleLogLevel(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - - def add_AdminServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetStatus': grpc.unary_unary_rpc_method_handler( - servicer.GetStatus, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=ServerStatus.SerializeToString, - ), - 'StartServer': grpc.unary_unary_rpc_method_handler( - servicer.StartServer, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=ServerStatus.SerializeToString, - ), - 'StopServer': grpc.unary_unary_rpc_method_handler( - servicer.StopServer, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=ServerStatus.SerializeToString, - ), - 'GetModuleLogLevel': grpc.unary_unary_rpc_method_handler( - servicer.GetModuleLogLevel, - request_deserializer=LogLevelRequest.FromString, - response_serializer=LogLevelResponse.SerializeToString, - ), - 'SetModuleLogLevel': grpc.unary_unary_rpc_method_handler( - servicer.SetModuleLogLevel, - request_deserializer=LogLevelRequest.FromString, - response_serializer=LogLevelResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'protos.Admin', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - class BetaAdminServicer(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - """Interface exported by the server. - """ - def GetStatus(self, request, context): - """Return the serve status. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def StartServer(self, request, context): - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def StopServer(self, request, context): - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def GetModuleLogLevel(self, request, context): - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def SetModuleLogLevel(self, request, context): - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - - class BetaAdminStub(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - """Interface exported by the server. - """ - def GetStatus(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Return the serve status. - """ - raise NotImplementedError() - GetStatus.future = None - def StartServer(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - raise NotImplementedError() - StartServer.future = None - def StopServer(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - raise NotImplementedError() - StopServer.future = None - def GetModuleLogLevel(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - raise NotImplementedError() - GetModuleLogLevel.future = None - def SetModuleLogLevel(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - raise NotImplementedError() - SetModuleLogLevel.future = None - - - def beta_create_Admin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_deserializers = { - ('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.FromString, - ('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.FromString, - ('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - } - response_serializers = { - ('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.SerializeToString, - ('protos.Admin', 'GetStatus'): ServerStatus.SerializeToString, - ('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.SerializeToString, - ('protos.Admin', 'StartServer'): ServerStatus.SerializeToString, - ('protos.Admin', 'StopServer'): ServerStatus.SerializeToString, - } - method_implementations = { - ('protos.Admin', 'GetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.GetModuleLogLevel), - ('protos.Admin', 'GetStatus'): face_utilities.unary_unary_inline(servicer.GetStatus), - ('protos.Admin', 'SetModuleLogLevel'): face_utilities.unary_unary_inline(servicer.SetModuleLogLevel), - ('protos.Admin', 'StartServer'): face_utilities.unary_unary_inline(servicer.StartServer), - ('protos.Admin', 'StopServer'): face_utilities.unary_unary_inline(servicer.StopServer), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - - def beta_create_Admin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_serializers = { - ('protos.Admin', 'GetModuleLogLevel'): LogLevelRequest.SerializeToString, - ('protos.Admin', 'GetStatus'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('protos.Admin', 'SetModuleLogLevel'): LogLevelRequest.SerializeToString, - ('protos.Admin', 'StartServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('protos.Admin', 'StopServer'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - } - response_deserializers = { - ('protos.Admin', 'GetModuleLogLevel'): LogLevelResponse.FromString, - ('protos.Admin', 'GetStatus'): ServerStatus.FromString, - ('protos.Admin', 'SetModuleLogLevel'): LogLevelResponse.FromString, - ('protos.Admin', 'StartServer'): ServerStatus.FromString, - ('protos.Admin', 'StopServer'): ServerStatus.FromString, - } - cardinalities = { - 'GetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY, - 'GetStatus': cardinality.Cardinality.UNARY_UNARY, - 'SetModuleLogLevel': cardinality.Cardinality.UNARY_UNARY, - 'StartServer': cardinality.Cardinality.UNARY_UNARY, - 'StopServer': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'protos.Admin', cardinalities, options=stub_options) -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/admin_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/admin_pb2_grpc.py deleted file mode 100644 index 0114d9a48..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/admin_pb2_grpc.py +++ /dev/null @@ -1,109 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -import google.protobuf.empty_pb2 as google_dot_protobuf_dot_empty__pb2 -import peer.admin_pb2 as peer_dot_admin__pb2 - - -class AdminStub(object): - """Interface exported by the server. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetStatus = channel.unary_unary( - '/protos.Admin/GetStatus', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=peer_dot_admin__pb2.ServerStatus.FromString, - ) - self.StartServer = channel.unary_unary( - '/protos.Admin/StartServer', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=peer_dot_admin__pb2.ServerStatus.FromString, - ) - self.StopServer = channel.unary_unary( - '/protos.Admin/StopServer', - request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=peer_dot_admin__pb2.ServerStatus.FromString, - ) - self.GetModuleLogLevel = channel.unary_unary( - '/protos.Admin/GetModuleLogLevel', - request_serializer=peer_dot_admin__pb2.LogLevelRequest.SerializeToString, - response_deserializer=peer_dot_admin__pb2.LogLevelResponse.FromString, - ) - self.SetModuleLogLevel = channel.unary_unary( - '/protos.Admin/SetModuleLogLevel', - request_serializer=peer_dot_admin__pb2.LogLevelRequest.SerializeToString, - response_deserializer=peer_dot_admin__pb2.LogLevelResponse.FromString, - ) - - -class AdminServicer(object): - """Interface exported by the server. - """ - - def GetStatus(self, request, context): - """Return the serve status. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def StartServer(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def StopServer(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetModuleLogLevel(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SetModuleLogLevel(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_AdminServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetStatus': grpc.unary_unary_rpc_method_handler( - servicer.GetStatus, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=peer_dot_admin__pb2.ServerStatus.SerializeToString, - ), - 'StartServer': grpc.unary_unary_rpc_method_handler( - servicer.StartServer, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=peer_dot_admin__pb2.ServerStatus.SerializeToString, - ), - 'StopServer': grpc.unary_unary_rpc_method_handler( - servicer.StopServer, - request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=peer_dot_admin__pb2.ServerStatus.SerializeToString, - ), - 'GetModuleLogLevel': grpc.unary_unary_rpc_method_handler( - servicer.GetModuleLogLevel, - request_deserializer=peer_dot_admin__pb2.LogLevelRequest.FromString, - response_serializer=peer_dot_admin__pb2.LogLevelResponse.SerializeToString, - ), - 'SetModuleLogLevel': grpc.unary_unary_rpc_method_handler( - servicer.SetModuleLogLevel, - request_deserializer=peer_dot_admin__pb2.LogLevelRequest.FromString, - response_serializer=peer_dot_admin__pb2.LogLevelResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'protos.Admin', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/app/platform/fabric/e2e-test/feature/peer/chaincode_event_pb2.py b/app/platform/fabric/e2e-test/feature/peer/chaincode_event_pb2.py deleted file mode 100644 index 7a09d82ea..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/chaincode_event_pb2.py +++ /dev/null @@ -1,102 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/chaincode_event.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/chaincode_event.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x1apeer/chaincode_event.proto\x12\x06protos\"Z\n\x0e\x43haincodeEvent\x12\x14\n\x0c\x63haincode_id\x18\x01 \x01(\t\x12\r\n\x05tx_id\x18\x02 \x01(\t\x12\x12\n\nevent_name\x18\x03 \x01(\t\x12\x0f\n\x07payload\x18\x04 \x01(\x0c\x42\x66\n\"org.hyperledger.fabric.protos.peerB\x15\x43haincodeEventPackageZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_CHAINCODEEVENT = _descriptor.Descriptor( - name='ChaincodeEvent', - full_name='protos.ChaincodeEvent', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chaincode_id', full_name='protos.ChaincodeEvent.chaincode_id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tx_id', full_name='protos.ChaincodeEvent.tx_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='event_name', full_name='protos.ChaincodeEvent.event_name', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='payload', full_name='protos.ChaincodeEvent.payload', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=38, - serialized_end=128, -) - -DESCRIPTOR.message_types_by_name['ChaincodeEvent'] = _CHAINCODEEVENT - -ChaincodeEvent = _reflection.GeneratedProtocolMessageType('ChaincodeEvent', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEEVENT, - __module__ = 'peer.chaincode_event_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeEvent) - )) -_sym_db.RegisterMessage(ChaincodeEvent) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerB\025ChaincodeEventPackageZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/chaincode_event_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/chaincode_event_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/chaincode_event_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/peer/chaincode_pb2.py b/app/platform/fabric/e2e-test/feature/peer/chaincode_pb2.py deleted file mode 100644 index b2e12b127..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/chaincode_pb2.py +++ /dev/null @@ -1,396 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/chaincode.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/chaincode.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x14peer/chaincode.proto\x12\x06protos\x1a\x1fgoogle/protobuf/timestamp.proto\":\n\x0b\x43haincodeID\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\t\"\x1e\n\x0e\x43haincodeInput\x12\x0c\n\x04\x61rgs\x18\x01 \x03(\x0c\"\xdc\x01\n\rChaincodeSpec\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.protos.ChaincodeSpec.Type\x12)\n\x0c\x63haincode_id\x18\x02 \x01(\x0b\x32\x13.protos.ChaincodeID\x12%\n\x05input\x18\x03 \x01(\x0b\x32\x16.protos.ChaincodeInput\x12\x0f\n\x07timeout\x18\x04 \x01(\x05\">\n\x04Type\x12\r\n\tUNDEFINED\x10\x00\x12\n\n\x06GOLANG\x10\x01\x12\x08\n\x04NODE\x10\x02\x12\x07\n\x03\x43\x41R\x10\x03\x12\x08\n\x04JAVA\x10\x04\"\x8a\x02\n\x17\x43haincodeDeploymentSpec\x12-\n\x0e\x63haincode_spec\x18\x01 \x01(\x0b\x32\x15.protos.ChaincodeSpec\x12\x32\n\x0e\x65\x66\x66\x65\x63tive_date\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x14\n\x0c\x63ode_package\x18\x03 \x01(\x0c\x12\x46\n\x08\x65xec_env\x18\x04 \x01(\x0e\x32\x34.protos.ChaincodeDeploymentSpec.ExecutionEnvironment\".\n\x14\x45xecutionEnvironment\x12\n\n\x06\x44OCKER\x10\x00\x12\n\n\x06SYSTEM\x10\x01\"c\n\x17\x43haincodeInvocationSpec\x12-\n\x0e\x63haincode_spec\x18\x01 \x01(\x0b\x32\x15.protos.ChaincodeSpec\x12\x19\n\x11id_generation_alg\x18\x02 \x01(\t*4\n\x14\x43onfidentialityLevel\x12\n\n\x06PUBLIC\x10\x00\x12\x10\n\x0c\x43ONFIDENTIAL\x10\x01\x42O\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -_CONFIDENTIALITYLEVEL = _descriptor.EnumDescriptor( - name='ConfidentialityLevel', - full_name='protos.ConfidentialityLevel', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='PUBLIC', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONFIDENTIAL', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=750, - serialized_end=802, -) -_sym_db.RegisterEnumDescriptor(_CONFIDENTIALITYLEVEL) - -ConfidentialityLevel = enum_type_wrapper.EnumTypeWrapper(_CONFIDENTIALITYLEVEL) -PUBLIC = 0 -CONFIDENTIAL = 1 - - -_CHAINCODESPEC_TYPE = _descriptor.EnumDescriptor( - name='Type', - full_name='protos.ChaincodeSpec.Type', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='UNDEFINED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GOLANG', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NODE', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAR', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='JAVA', index=4, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=316, - serialized_end=378, -) -_sym_db.RegisterEnumDescriptor(_CHAINCODESPEC_TYPE) - -_CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT = _descriptor.EnumDescriptor( - name='ExecutionEnvironment', - full_name='protos.ChaincodeDeploymentSpec.ExecutionEnvironment', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DOCKER', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SYSTEM', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=601, - serialized_end=647, -) -_sym_db.RegisterEnumDescriptor(_CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT) - - -_CHAINCODEID = _descriptor.Descriptor( - name='ChaincodeID', - full_name='protos.ChaincodeID', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='path', full_name='protos.ChaincodeID.path', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='name', full_name='protos.ChaincodeID.name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='version', full_name='protos.ChaincodeID.version', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=65, - serialized_end=123, -) - - -_CHAINCODEINPUT = _descriptor.Descriptor( - name='ChaincodeInput', - full_name='protos.ChaincodeInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='args', full_name='protos.ChaincodeInput.args', index=0, - number=1, type=12, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=125, - serialized_end=155, -) - - -_CHAINCODESPEC = _descriptor.Descriptor( - name='ChaincodeSpec', - full_name='protos.ChaincodeSpec', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='protos.ChaincodeSpec.type', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chaincode_id', full_name='protos.ChaincodeSpec.chaincode_id', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input', full_name='protos.ChaincodeSpec.input', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timeout', full_name='protos.ChaincodeSpec.timeout', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _CHAINCODESPEC_TYPE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=158, - serialized_end=378, -) - - -_CHAINCODEDEPLOYMENTSPEC = _descriptor.Descriptor( - name='ChaincodeDeploymentSpec', - full_name='protos.ChaincodeDeploymentSpec', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chaincode_spec', full_name='protos.ChaincodeDeploymentSpec.chaincode_spec', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='effective_date', full_name='protos.ChaincodeDeploymentSpec.effective_date', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='code_package', full_name='protos.ChaincodeDeploymentSpec.code_package', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='exec_env', full_name='protos.ChaincodeDeploymentSpec.exec_env', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=381, - serialized_end=647, -) - - -_CHAINCODEINVOCATIONSPEC = _descriptor.Descriptor( - name='ChaincodeInvocationSpec', - full_name='protos.ChaincodeInvocationSpec', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chaincode_spec', full_name='protos.ChaincodeInvocationSpec.chaincode_spec', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='id_generation_alg', full_name='protos.ChaincodeInvocationSpec.id_generation_alg', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=649, - serialized_end=748, -) - -_CHAINCODESPEC.fields_by_name['type'].enum_type = _CHAINCODESPEC_TYPE -_CHAINCODESPEC.fields_by_name['chaincode_id'].message_type = _CHAINCODEID -_CHAINCODESPEC.fields_by_name['input'].message_type = _CHAINCODEINPUT -_CHAINCODESPEC_TYPE.containing_type = _CHAINCODESPEC -_CHAINCODEDEPLOYMENTSPEC.fields_by_name['chaincode_spec'].message_type = _CHAINCODESPEC -_CHAINCODEDEPLOYMENTSPEC.fields_by_name['effective_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CHAINCODEDEPLOYMENTSPEC.fields_by_name['exec_env'].enum_type = _CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT -_CHAINCODEDEPLOYMENTSPEC_EXECUTIONENVIRONMENT.containing_type = _CHAINCODEDEPLOYMENTSPEC -_CHAINCODEINVOCATIONSPEC.fields_by_name['chaincode_spec'].message_type = _CHAINCODESPEC -DESCRIPTOR.message_types_by_name['ChaincodeID'] = _CHAINCODEID -DESCRIPTOR.message_types_by_name['ChaincodeInput'] = _CHAINCODEINPUT -DESCRIPTOR.message_types_by_name['ChaincodeSpec'] = _CHAINCODESPEC -DESCRIPTOR.message_types_by_name['ChaincodeDeploymentSpec'] = _CHAINCODEDEPLOYMENTSPEC -DESCRIPTOR.message_types_by_name['ChaincodeInvocationSpec'] = _CHAINCODEINVOCATIONSPEC -DESCRIPTOR.enum_types_by_name['ConfidentialityLevel'] = _CONFIDENTIALITYLEVEL - -ChaincodeID = _reflection.GeneratedProtocolMessageType('ChaincodeID', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEID, - __module__ = 'peer.chaincode_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeID) - )) -_sym_db.RegisterMessage(ChaincodeID) - -ChaincodeInput = _reflection.GeneratedProtocolMessageType('ChaincodeInput', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEINPUT, - __module__ = 'peer.chaincode_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeInput) - )) -_sym_db.RegisterMessage(ChaincodeInput) - -ChaincodeSpec = _reflection.GeneratedProtocolMessageType('ChaincodeSpec', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODESPEC, - __module__ = 'peer.chaincode_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeSpec) - )) -_sym_db.RegisterMessage(ChaincodeSpec) - -ChaincodeDeploymentSpec = _reflection.GeneratedProtocolMessageType('ChaincodeDeploymentSpec', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEDEPLOYMENTSPEC, - __module__ = 'peer.chaincode_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeDeploymentSpec) - )) -_sym_db.RegisterMessage(ChaincodeDeploymentSpec) - -ChaincodeInvocationSpec = _reflection.GeneratedProtocolMessageType('ChaincodeInvocationSpec', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEINVOCATIONSPEC, - __module__ = 'peer.chaincode_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeInvocationSpec) - )) -_sym_db.RegisterMessage(ChaincodeInvocationSpec) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/chaincode_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/chaincode_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/chaincode_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/peer/chaincode_shim_pb2.py b/app/platform/fabric/e2e-test/feature/peer/chaincode_shim_pb2.py deleted file mode 100644 index 7126da2cb..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/chaincode_shim_pb2.py +++ /dev/null @@ -1,670 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/chaincode_shim.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from peer import chaincode_event_pb2 as peer_dot_chaincode__event__pb2 -from peer import proposal_pb2 as peer_dot_proposal__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/chaincode_shim.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x19peer/chaincode_shim.proto\x12\x06protos\x1a\x1apeer/chaincode_event.proto\x1a\x13peer/proposal.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xac\x04\n\x10\x43haincodeMessage\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1d.protos.ChaincodeMessage.Type\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\x12\x0c\n\x04txid\x18\x04 \x01(\t\x12\"\n\x08proposal\x18\x05 \x01(\x0b\x32\x10.protos.Proposal\x12/\n\x0f\x63haincode_event\x18\x06 \x01(\x0b\x32\x16.protos.ChaincodeEvent\"\xc7\x02\n\x04Type\x12\r\n\tUNDEFINED\x10\x00\x12\x0c\n\x08REGISTER\x10\x01\x12\x0e\n\nREGISTERED\x10\x02\x12\x08\n\x04INIT\x10\x03\x12\t\n\x05READY\x10\x04\x12\x0f\n\x0bTRANSACTION\x10\x05\x12\r\n\tCOMPLETED\x10\x06\x12\t\n\x05\x45RROR\x10\x07\x12\r\n\tGET_STATE\x10\x08\x12\r\n\tPUT_STATE\x10\t\x12\r\n\tDEL_STATE\x10\n\x12\x14\n\x10INVOKE_CHAINCODE\x10\x0b\x12\x0c\n\x08RESPONSE\x10\r\x12\x16\n\x12GET_STATE_BY_RANGE\x10\x0e\x12\x14\n\x10GET_QUERY_RESULT\x10\x0f\x12\x14\n\x10QUERY_STATE_NEXT\x10\x10\x12\x15\n\x11QUERY_STATE_CLOSE\x10\x11\x12\r\n\tKEEPALIVE\x10\x12\x12\x17\n\x13GET_HISTORY_FOR_KEY\x10\x13\"*\n\x0cPutStateInfo\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\"3\n\x0fGetStateByRange\x12\x10\n\x08startKey\x18\x01 \x01(\t\x12\x0e\n\x06\x65ndKey\x18\x02 \x01(\t\"\x1f\n\x0eGetQueryResult\x12\r\n\x05query\x18\x01 \x01(\t\"\x1f\n\x10GetHistoryForKey\x12\x0b\n\x03key\x18\x01 \x01(\t\"\x1c\n\x0eQueryStateNext\x12\n\n\x02id\x18\x01 \x01(\t\"\x1d\n\x0fQueryStateClose\x12\n\n\x02id\x18\x01 \x01(\t\"0\n\x12QueryStateKeyValue\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\"g\n\x12QueryStateResponse\x12\x33\n\x0fkeys_and_values\x18\x01 \x03(\x0b\x32\x1a.protos.QueryStateKeyValue\x12\x10\n\x08has_more\x18\x02 \x01(\x08\x12\n\n\x02id\x18\x03 \x01(\t2X\n\x10\x43haincodeSupport\x12\x44\n\x08Register\x12\x18.protos.ChaincodeMessage\x1a\x18.protos.ChaincodeMessage\"\x00(\x01\x30\x01\x42O\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[peer_dot_chaincode__event__pb2.DESCRIPTOR,peer_dot_proposal__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_CHAINCODEMESSAGE_TYPE = _descriptor.EnumDescriptor( - name='Type', - full_name='protos.ChaincodeMessage.Type', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='UNDEFINED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='REGISTER', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='REGISTERED', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INIT', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READY', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TRANSACTION', index=5, number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='COMPLETED', index=6, number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ERROR', index=7, number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GET_STATE', index=8, number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PUT_STATE', index=9, number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DEL_STATE', index=10, number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INVOKE_CHAINCODE', index=11, number=11, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RESPONSE', index=12, number=13, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GET_STATE_BY_RANGE', index=13, number=14, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GET_QUERY_RESULT', index=14, number=15, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='QUERY_STATE_NEXT', index=15, number=16, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='QUERY_STATE_CLOSE', index=16, number=17, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='KEEPALIVE', index=17, number=18, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GET_HISTORY_FOR_KEY', index=18, number=19, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=349, - serialized_end=676, -) -_sym_db.RegisterEnumDescriptor(_CHAINCODEMESSAGE_TYPE) - - -_CHAINCODEMESSAGE = _descriptor.Descriptor( - name='ChaincodeMessage', - full_name='protos.ChaincodeMessage', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='protos.ChaincodeMessage.type', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp', full_name='protos.ChaincodeMessage.timestamp', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='payload', full_name='protos.ChaincodeMessage.payload', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='txid', full_name='protos.ChaincodeMessage.txid', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='proposal', full_name='protos.ChaincodeMessage.proposal', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chaincode_event', full_name='protos.ChaincodeMessage.chaincode_event', index=5, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _CHAINCODEMESSAGE_TYPE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=120, - serialized_end=676, -) - - -_PUTSTATEINFO = _descriptor.Descriptor( - name='PutStateInfo', - full_name='protos.PutStateInfo', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='protos.PutStateInfo.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='protos.PutStateInfo.value', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=678, - serialized_end=720, -) - - -_GETSTATEBYRANGE = _descriptor.Descriptor( - name='GetStateByRange', - full_name='protos.GetStateByRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='startKey', full_name='protos.GetStateByRange.startKey', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='endKey', full_name='protos.GetStateByRange.endKey', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=722, - serialized_end=773, -) - - -_GETQUERYRESULT = _descriptor.Descriptor( - name='GetQueryResult', - full_name='protos.GetQueryResult', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='query', full_name='protos.GetQueryResult.query', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=775, - serialized_end=806, -) - - -_GETHISTORYFORKEY = _descriptor.Descriptor( - name='GetHistoryForKey', - full_name='protos.GetHistoryForKey', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='protos.GetHistoryForKey.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=808, - serialized_end=839, -) - - -_QUERYSTATENEXT = _descriptor.Descriptor( - name='QueryStateNext', - full_name='protos.QueryStateNext', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='id', full_name='protos.QueryStateNext.id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=841, - serialized_end=869, -) - - -_QUERYSTATECLOSE = _descriptor.Descriptor( - name='QueryStateClose', - full_name='protos.QueryStateClose', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='id', full_name='protos.QueryStateClose.id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=871, - serialized_end=900, -) - - -_QUERYSTATEKEYVALUE = _descriptor.Descriptor( - name='QueryStateKeyValue', - full_name='protos.QueryStateKeyValue', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='protos.QueryStateKeyValue.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='protos.QueryStateKeyValue.value', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=902, - serialized_end=950, -) - - -_QUERYSTATERESPONSE = _descriptor.Descriptor( - name='QueryStateResponse', - full_name='protos.QueryStateResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='keys_and_values', full_name='protos.QueryStateResponse.keys_and_values', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='has_more', full_name='protos.QueryStateResponse.has_more', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='id', full_name='protos.QueryStateResponse.id', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=952, - serialized_end=1055, -) - -_CHAINCODEMESSAGE.fields_by_name['type'].enum_type = _CHAINCODEMESSAGE_TYPE -_CHAINCODEMESSAGE.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CHAINCODEMESSAGE.fields_by_name['proposal'].message_type = peer_dot_proposal__pb2._PROPOSAL -_CHAINCODEMESSAGE.fields_by_name['chaincode_event'].message_type = peer_dot_chaincode__event__pb2._CHAINCODEEVENT -_CHAINCODEMESSAGE_TYPE.containing_type = _CHAINCODEMESSAGE -_QUERYSTATERESPONSE.fields_by_name['keys_and_values'].message_type = _QUERYSTATEKEYVALUE -DESCRIPTOR.message_types_by_name['ChaincodeMessage'] = _CHAINCODEMESSAGE -DESCRIPTOR.message_types_by_name['PutStateInfo'] = _PUTSTATEINFO -DESCRIPTOR.message_types_by_name['GetStateByRange'] = _GETSTATEBYRANGE -DESCRIPTOR.message_types_by_name['GetQueryResult'] = _GETQUERYRESULT -DESCRIPTOR.message_types_by_name['GetHistoryForKey'] = _GETHISTORYFORKEY -DESCRIPTOR.message_types_by_name['QueryStateNext'] = _QUERYSTATENEXT -DESCRIPTOR.message_types_by_name['QueryStateClose'] = _QUERYSTATECLOSE -DESCRIPTOR.message_types_by_name['QueryStateKeyValue'] = _QUERYSTATEKEYVALUE -DESCRIPTOR.message_types_by_name['QueryStateResponse'] = _QUERYSTATERESPONSE - -ChaincodeMessage = _reflection.GeneratedProtocolMessageType('ChaincodeMessage', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEMESSAGE, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeMessage) - )) -_sym_db.RegisterMessage(ChaincodeMessage) - -PutStateInfo = _reflection.GeneratedProtocolMessageType('PutStateInfo', (_message.Message,), dict( - DESCRIPTOR = _PUTSTATEINFO, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.PutStateInfo) - )) -_sym_db.RegisterMessage(PutStateInfo) - -GetStateByRange = _reflection.GeneratedProtocolMessageType('GetStateByRange', (_message.Message,), dict( - DESCRIPTOR = _GETSTATEBYRANGE, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.GetStateByRange) - )) -_sym_db.RegisterMessage(GetStateByRange) - -GetQueryResult = _reflection.GeneratedProtocolMessageType('GetQueryResult', (_message.Message,), dict( - DESCRIPTOR = _GETQUERYRESULT, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.GetQueryResult) - )) -_sym_db.RegisterMessage(GetQueryResult) - -GetHistoryForKey = _reflection.GeneratedProtocolMessageType('GetHistoryForKey', (_message.Message,), dict( - DESCRIPTOR = _GETHISTORYFORKEY, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.GetHistoryForKey) - )) -_sym_db.RegisterMessage(GetHistoryForKey) - -QueryStateNext = _reflection.GeneratedProtocolMessageType('QueryStateNext', (_message.Message,), dict( - DESCRIPTOR = _QUERYSTATENEXT, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.QueryStateNext) - )) -_sym_db.RegisterMessage(QueryStateNext) - -QueryStateClose = _reflection.GeneratedProtocolMessageType('QueryStateClose', (_message.Message,), dict( - DESCRIPTOR = _QUERYSTATECLOSE, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.QueryStateClose) - )) -_sym_db.RegisterMessage(QueryStateClose) - -QueryStateKeyValue = _reflection.GeneratedProtocolMessageType('QueryStateKeyValue', (_message.Message,), dict( - DESCRIPTOR = _QUERYSTATEKEYVALUE, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.QueryStateKeyValue) - )) -_sym_db.RegisterMessage(QueryStateKeyValue) - -QueryStateResponse = _reflection.GeneratedProtocolMessageType('QueryStateResponse', (_message.Message,), dict( - DESCRIPTOR = _QUERYSTATERESPONSE, - __module__ = 'peer.chaincode_shim_pb2' - # @@protoc_insertion_point(class_scope:protos.QueryStateResponse) - )) -_sym_db.RegisterMessage(QueryStateResponse) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - - - class ChaincodeSupportStub(object): - """Interface that provides support to chaincode execution. ChaincodeContext - provides the context necessary for the server to respond appropriately. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Register = channel.stream_stream( - '/protos.ChaincodeSupport/Register', - request_serializer=ChaincodeMessage.SerializeToString, - response_deserializer=ChaincodeMessage.FromString, - ) - - - class ChaincodeSupportServicer(object): - """Interface that provides support to chaincode execution. ChaincodeContext - provides the context necessary for the server to respond appropriately. - """ - - def Register(self, request_iterator, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - - def add_ChaincodeSupportServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Register': grpc.stream_stream_rpc_method_handler( - servicer.Register, - request_deserializer=ChaincodeMessage.FromString, - response_serializer=ChaincodeMessage.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'protos.ChaincodeSupport', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - class BetaChaincodeSupportServicer(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - """Interface that provides support to chaincode execution. ChaincodeContext - provides the context necessary for the server to respond appropriately. - """ - def Register(self, request_iterator, context): - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - - class BetaChaincodeSupportStub(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - """Interface that provides support to chaincode execution. ChaincodeContext - provides the context necessary for the server to respond appropriately. - """ - def Register(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - raise NotImplementedError() - - - def beta_create_ChaincodeSupport_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_deserializers = { - ('protos.ChaincodeSupport', 'Register'): ChaincodeMessage.FromString, - } - response_serializers = { - ('protos.ChaincodeSupport', 'Register'): ChaincodeMessage.SerializeToString, - } - method_implementations = { - ('protos.ChaincodeSupport', 'Register'): face_utilities.stream_stream_inline(servicer.Register), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - - def beta_create_ChaincodeSupport_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_serializers = { - ('protos.ChaincodeSupport', 'Register'): ChaincodeMessage.SerializeToString, - } - response_deserializers = { - ('protos.ChaincodeSupport', 'Register'): ChaincodeMessage.FromString, - } - cardinalities = { - 'Register': cardinality.Cardinality.STREAM_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'protos.ChaincodeSupport', cardinalities, options=stub_options) -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/chaincode_shim_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/chaincode_shim_pb2_grpc.py deleted file mode 100644 index 49f2357f2..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/chaincode_shim_pb2_grpc.py +++ /dev/null @@ -1,48 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -import peer.chaincode_shim_pb2 as peer_dot_chaincode__shim__pb2 - - -class ChaincodeSupportStub(object): - """Interface that provides support to chaincode execution. ChaincodeContext - provides the context necessary for the server to respond appropriately. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Register = channel.stream_stream( - '/protos.ChaincodeSupport/Register', - request_serializer=peer_dot_chaincode__shim__pb2.ChaincodeMessage.SerializeToString, - response_deserializer=peer_dot_chaincode__shim__pb2.ChaincodeMessage.FromString, - ) - - -class ChaincodeSupportServicer(object): - """Interface that provides support to chaincode execution. ChaincodeContext - provides the context necessary for the server to respond appropriately. - """ - - def Register(self, request_iterator, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_ChaincodeSupportServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Register': grpc.stream_stream_rpc_method_handler( - servicer.Register, - request_deserializer=peer_dot_chaincode__shim__pb2.ChaincodeMessage.FromString, - response_serializer=peer_dot_chaincode__shim__pb2.ChaincodeMessage.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'protos.ChaincodeSupport', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/app/platform/fabric/e2e-test/feature/peer/configuration_pb2.py b/app/platform/fabric/e2e-test/feature/peer/configuration_pb2.py deleted file mode 100644 index 2aba1a786..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/configuration_pb2.py +++ /dev/null @@ -1,128 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/configuration.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/configuration.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x18peer/configuration.proto\x12\x06protos\"7\n\x0b\x41nchorPeers\x12(\n\x0c\x61nchor_peers\x18\x01 \x03(\x0b\x32\x12.protos.AnchorPeer\"(\n\nAnchorPeer\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x42O\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_ANCHORPEERS = _descriptor.Descriptor( - name='AnchorPeers', - full_name='protos.AnchorPeers', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='anchor_peers', full_name='protos.AnchorPeers.anchor_peers', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=36, - serialized_end=91, -) - - -_ANCHORPEER = _descriptor.Descriptor( - name='AnchorPeer', - full_name='protos.AnchorPeer', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='host', full_name='protos.AnchorPeer.host', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='port', full_name='protos.AnchorPeer.port', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=93, - serialized_end=133, -) - -_ANCHORPEERS.fields_by_name['anchor_peers'].message_type = _ANCHORPEER -DESCRIPTOR.message_types_by_name['AnchorPeers'] = _ANCHORPEERS -DESCRIPTOR.message_types_by_name['AnchorPeer'] = _ANCHORPEER - -AnchorPeers = _reflection.GeneratedProtocolMessageType('AnchorPeers', (_message.Message,), dict( - DESCRIPTOR = _ANCHORPEERS, - __module__ = 'peer.configuration_pb2' - # @@protoc_insertion_point(class_scope:protos.AnchorPeers) - )) -_sym_db.RegisterMessage(AnchorPeers) - -AnchorPeer = _reflection.GeneratedProtocolMessageType('AnchorPeer', (_message.Message,), dict( - DESCRIPTOR = _ANCHORPEER, - __module__ = 'peer.configuration_pb2' - # @@protoc_insertion_point(class_scope:protos.AnchorPeer) - )) -_sym_db.RegisterMessage(AnchorPeer) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/configuration_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/configuration_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/configuration_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/peer/events_pb2.py b/app/platform/fabric/e2e-test/feature/peer/events_pb2.py deleted file mode 100644 index 720911e49..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/events_pb2.py +++ /dev/null @@ -1,568 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/events.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from common import common_pb2 as common_dot_common__pb2 -from peer import chaincode_event_pb2 as peer_dot_chaincode__event__pb2 -from peer import transaction_pb2 as peer_dot_transaction__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/events.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x11peer/events.proto\x12\x06protos\x1a\x13\x63ommon/common.proto\x1a\x1apeer/chaincode_event.proto\x1a\x16peer/transaction.proto\"8\n\x0c\x43haincodeReg\x12\x14\n\x0c\x63haincode_id\x18\x01 \x01(\t\x12\x12\n\nevent_name\x18\x02 \x01(\t\"\x81\x01\n\x08Interest\x12%\n\nevent_type\x18\x01 \x01(\x0e\x32\x11.protos.EventType\x12\x32\n\x12\x63haincode_reg_info\x18\x02 \x01(\x0b\x32\x14.protos.ChaincodeRegH\x00\x12\x0f\n\x07\x63hainID\x18\x03 \x01(\tB\t\n\x07RegInfo\",\n\x08Register\x12 \n\x06\x65vents\x18\x01 \x03(\x0b\x32\x10.protos.Interest\"?\n\tRejection\x12\x1f\n\x02tx\x18\x01 \x01(\x0b\x32\x13.protos.Transaction\x12\x11\n\terror_msg\x18\x02 \x01(\t\".\n\nUnregister\x12 \n\x06\x65vents\x18\x01 \x03(\x0b\x32\x10.protos.Interest\"4\n\x0bSignedEvent\x12\x11\n\tsignature\x18\x01 \x01(\x0c\x12\x12\n\neventBytes\x18\x02 \x01(\x0c\"\xec\x01\n\x05\x45vent\x12$\n\x08register\x18\x01 \x01(\x0b\x32\x10.protos.RegisterH\x00\x12\x1e\n\x05\x62lock\x18\x02 \x01(\x0b\x32\r.common.BlockH\x00\x12\x31\n\x0f\x63haincode_event\x18\x03 \x01(\x0b\x32\x16.protos.ChaincodeEventH\x00\x12&\n\trejection\x18\x04 \x01(\x0b\x32\x11.protos.RejectionH\x00\x12(\n\nunregister\x18\x05 \x01(\x0b\x32\x12.protos.UnregisterH\x00\x12\x0f\n\x07\x63reator\x18\x06 \x01(\x0c\x42\x07\n\x05\x45vent*B\n\tEventType\x12\x0c\n\x08REGISTER\x10\x00\x12\t\n\x05\x42LOCK\x10\x01\x12\r\n\tCHAINCODE\x10\x02\x12\r\n\tREJECTION\x10\x03\x32\x34\n\x06\x45vents\x12*\n\x04\x43hat\x12\r.protos.Event\x1a\r.protos.Event\"\x00(\x01\x30\x01\x42^\n\"org.hyperledger.fabric.protos.peerB\rEventsPackageZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[common_dot_common__pb2.DESCRIPTOR,peer_dot_chaincode__event__pb2.DESCRIPTOR,peer_dot_transaction__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -_EVENTTYPE = _descriptor.EnumDescriptor( - name='EventType', - full_name='protos.EventType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='REGISTER', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BLOCK', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CHAINCODE', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='REJECTION', index=3, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=744, - serialized_end=810, -) -_sym_db.RegisterEnumDescriptor(_EVENTTYPE) - -EventType = enum_type_wrapper.EnumTypeWrapper(_EVENTTYPE) -REGISTER = 0 -BLOCK = 1 -CHAINCODE = 2 -REJECTION = 3 - - - -_CHAINCODEREG = _descriptor.Descriptor( - name='ChaincodeReg', - full_name='protos.ChaincodeReg', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chaincode_id', full_name='protos.ChaincodeReg.chaincode_id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='event_name', full_name='protos.ChaincodeReg.event_name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=102, - serialized_end=158, -) - - -_INTEREST = _descriptor.Descriptor( - name='Interest', - full_name='protos.Interest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='event_type', full_name='protos.Interest.event_type', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chaincode_reg_info', full_name='protos.Interest.chaincode_reg_info', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chainID', full_name='protos.Interest.chainID', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='RegInfo', full_name='protos.Interest.RegInfo', - index=0, containing_type=None, fields=[]), - ], - serialized_start=161, - serialized_end=290, -) - - -_REGISTER = _descriptor.Descriptor( - name='Register', - full_name='protos.Register', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='events', full_name='protos.Register.events', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=292, - serialized_end=336, -) - - -_REJECTION = _descriptor.Descriptor( - name='Rejection', - full_name='protos.Rejection', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tx', full_name='protos.Rejection.tx', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='error_msg', full_name='protos.Rejection.error_msg', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=338, - serialized_end=401, -) - - -_UNREGISTER = _descriptor.Descriptor( - name='Unregister', - full_name='protos.Unregister', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='events', full_name='protos.Unregister.events', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=403, - serialized_end=449, -) - - -_SIGNEDEVENT = _descriptor.Descriptor( - name='SignedEvent', - full_name='protos.SignedEvent', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='signature', full_name='protos.SignedEvent.signature', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='eventBytes', full_name='protos.SignedEvent.eventBytes', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=451, - serialized_end=503, -) - - -_EVENT = _descriptor.Descriptor( - name='Event', - full_name='protos.Event', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='register', full_name='protos.Event.register', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='block', full_name='protos.Event.block', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chaincode_event', full_name='protos.Event.chaincode_event', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rejection', full_name='protos.Event.rejection', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='unregister', full_name='protos.Event.unregister', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='creator', full_name='protos.Event.creator', index=5, - number=6, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='Event', full_name='protos.Event.Event', - index=0, containing_type=None, fields=[]), - ], - serialized_start=506, - serialized_end=742, -) - -_INTEREST.fields_by_name['event_type'].enum_type = _EVENTTYPE -_INTEREST.fields_by_name['chaincode_reg_info'].message_type = _CHAINCODEREG -_INTEREST.oneofs_by_name['RegInfo'].fields.append( - _INTEREST.fields_by_name['chaincode_reg_info']) -_INTEREST.fields_by_name['chaincode_reg_info'].containing_oneof = _INTEREST.oneofs_by_name['RegInfo'] -_REGISTER.fields_by_name['events'].message_type = _INTEREST -_REJECTION.fields_by_name['tx'].message_type = peer_dot_transaction__pb2._TRANSACTION -_UNREGISTER.fields_by_name['events'].message_type = _INTEREST -_EVENT.fields_by_name['register'].message_type = _REGISTER -_EVENT.fields_by_name['block'].message_type = common_dot_common__pb2._BLOCK -_EVENT.fields_by_name['chaincode_event'].message_type = peer_dot_chaincode__event__pb2._CHAINCODEEVENT -_EVENT.fields_by_name['rejection'].message_type = _REJECTION -_EVENT.fields_by_name['unregister'].message_type = _UNREGISTER -_EVENT.oneofs_by_name['Event'].fields.append( - _EVENT.fields_by_name['register']) -_EVENT.fields_by_name['register'].containing_oneof = _EVENT.oneofs_by_name['Event'] -_EVENT.oneofs_by_name['Event'].fields.append( - _EVENT.fields_by_name['block']) -_EVENT.fields_by_name['block'].containing_oneof = _EVENT.oneofs_by_name['Event'] -_EVENT.oneofs_by_name['Event'].fields.append( - _EVENT.fields_by_name['chaincode_event']) -_EVENT.fields_by_name['chaincode_event'].containing_oneof = _EVENT.oneofs_by_name['Event'] -_EVENT.oneofs_by_name['Event'].fields.append( - _EVENT.fields_by_name['rejection']) -_EVENT.fields_by_name['rejection'].containing_oneof = _EVENT.oneofs_by_name['Event'] -_EVENT.oneofs_by_name['Event'].fields.append( - _EVENT.fields_by_name['unregister']) -_EVENT.fields_by_name['unregister'].containing_oneof = _EVENT.oneofs_by_name['Event'] -DESCRIPTOR.message_types_by_name['ChaincodeReg'] = _CHAINCODEREG -DESCRIPTOR.message_types_by_name['Interest'] = _INTEREST -DESCRIPTOR.message_types_by_name['Register'] = _REGISTER -DESCRIPTOR.message_types_by_name['Rejection'] = _REJECTION -DESCRIPTOR.message_types_by_name['Unregister'] = _UNREGISTER -DESCRIPTOR.message_types_by_name['SignedEvent'] = _SIGNEDEVENT -DESCRIPTOR.message_types_by_name['Event'] = _EVENT -DESCRIPTOR.enum_types_by_name['EventType'] = _EVENTTYPE - -ChaincodeReg = _reflection.GeneratedProtocolMessageType('ChaincodeReg', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEREG, - __module__ = 'peer.events_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeReg) - )) -_sym_db.RegisterMessage(ChaincodeReg) - -Interest = _reflection.GeneratedProtocolMessageType('Interest', (_message.Message,), dict( - DESCRIPTOR = _INTEREST, - __module__ = 'peer.events_pb2' - # @@protoc_insertion_point(class_scope:protos.Interest) - )) -_sym_db.RegisterMessage(Interest) - -Register = _reflection.GeneratedProtocolMessageType('Register', (_message.Message,), dict( - DESCRIPTOR = _REGISTER, - __module__ = 'peer.events_pb2' - # @@protoc_insertion_point(class_scope:protos.Register) - )) -_sym_db.RegisterMessage(Register) - -Rejection = _reflection.GeneratedProtocolMessageType('Rejection', (_message.Message,), dict( - DESCRIPTOR = _REJECTION, - __module__ = 'peer.events_pb2' - # @@protoc_insertion_point(class_scope:protos.Rejection) - )) -_sym_db.RegisterMessage(Rejection) - -Unregister = _reflection.GeneratedProtocolMessageType('Unregister', (_message.Message,), dict( - DESCRIPTOR = _UNREGISTER, - __module__ = 'peer.events_pb2' - # @@protoc_insertion_point(class_scope:protos.Unregister) - )) -_sym_db.RegisterMessage(Unregister) - -SignedEvent = _reflection.GeneratedProtocolMessageType('SignedEvent', (_message.Message,), dict( - DESCRIPTOR = _SIGNEDEVENT, - __module__ = 'peer.events_pb2' - # @@protoc_insertion_point(class_scope:protos.SignedEvent) - )) -_sym_db.RegisterMessage(SignedEvent) - -Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict( - DESCRIPTOR = _EVENT, - __module__ = 'peer.events_pb2' - # @@protoc_insertion_point(class_scope:protos.Event) - )) -_sym_db.RegisterMessage(Event) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerB\rEventsPackageZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - - - class EventsStub(object): - """Interface exported by the events server - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Chat = channel.stream_stream( - '/protos.Events/Chat', - request_serializer=Event.SerializeToString, - response_deserializer=Event.FromString, - ) - - - class EventsServicer(object): - """Interface exported by the events server - """ - - def Chat(self, request_iterator, context): - """event chatting using Event - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - - def add_EventsServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Chat': grpc.stream_stream_rpc_method_handler( - servicer.Chat, - request_deserializer=Event.FromString, - response_serializer=Event.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'protos.Events', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - class BetaEventsServicer(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - """Interface exported by the events server - """ - def Chat(self, request_iterator, context): - """event chatting using Event - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - - class BetaEventsStub(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - """Interface exported by the events server - """ - def Chat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None): - """event chatting using Event - """ - raise NotImplementedError() - - - def beta_create_Events_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_deserializers = { - ('protos.Events', 'Chat'): Event.FromString, - } - response_serializers = { - ('protos.Events', 'Chat'): Event.SerializeToString, - } - method_implementations = { - ('protos.Events', 'Chat'): face_utilities.stream_stream_inline(servicer.Chat), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - - def beta_create_Events_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_serializers = { - ('protos.Events', 'Chat'): Event.SerializeToString, - } - response_deserializers = { - ('protos.Events', 'Chat'): Event.FromString, - } - cardinalities = { - 'Chat': cardinality.Cardinality.STREAM_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'protos.Events', cardinalities, options=stub_options) -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/events_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/events_pb2_grpc.py deleted file mode 100644 index 8331caf53..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/events_pb2_grpc.py +++ /dev/null @@ -1,48 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -import peer.events_pb2 as peer_dot_events__pb2 - - -class EventsStub(object): - """Interface exported by the events server - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Chat = channel.stream_stream( - '/protos.Events/Chat', - request_serializer=peer_dot_events__pb2.Event.SerializeToString, - response_deserializer=peer_dot_events__pb2.Event.FromString, - ) - - -class EventsServicer(object): - """Interface exported by the events server - """ - - def Chat(self, request_iterator, context): - """event chatting using Event - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_EventsServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Chat': grpc.stream_stream_rpc_method_handler( - servicer.Chat, - request_deserializer=peer_dot_events__pb2.Event.FromString, - response_serializer=peer_dot_events__pb2.Event.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'protos.Events', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/app/platform/fabric/e2e-test/feature/peer/peer_pb2.py b/app/platform/fabric/e2e-test/feature/peer/peer_pb2.py deleted file mode 100644 index 6177b5974..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/peer_pb2.py +++ /dev/null @@ -1,226 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/peer.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from peer import proposal_pb2 as peer_dot_proposal__pb2 -from peer import proposal_response_pb2 as peer_dot_proposal__response__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/peer.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x0fpeer/peer.proto\x12\x06protos\x1a\x13peer/proposal.proto\x1a\x1cpeer/proposal_response.proto\"\x16\n\x06PeerID\x12\x0c\n\x04name\x18\x01 \x01(\t\";\n\x0cPeerEndpoint\x12\x1a\n\x02id\x18\x01 \x01(\x0b\x32\x0e.protos.PeerID\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t2Q\n\x08\x45ndorser\x12\x45\n\x0fProcessProposal\x12\x16.protos.SignedProposal\x1a\x18.protos.ProposalResponse\"\x00\x42O\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[peer_dot_proposal__pb2.DESCRIPTOR,peer_dot_proposal__response__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_PEERID = _descriptor.Descriptor( - name='PeerID', - full_name='protos.PeerID', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='protos.PeerID.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=78, - serialized_end=100, -) - - -_PEERENDPOINT = _descriptor.Descriptor( - name='PeerEndpoint', - full_name='protos.PeerEndpoint', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='id', full_name='protos.PeerEndpoint.id', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='address', full_name='protos.PeerEndpoint.address', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=102, - serialized_end=161, -) - -_PEERENDPOINT.fields_by_name['id'].message_type = _PEERID -DESCRIPTOR.message_types_by_name['PeerID'] = _PEERID -DESCRIPTOR.message_types_by_name['PeerEndpoint'] = _PEERENDPOINT - -PeerID = _reflection.GeneratedProtocolMessageType('PeerID', (_message.Message,), dict( - DESCRIPTOR = _PEERID, - __module__ = 'peer.peer_pb2' - # @@protoc_insertion_point(class_scope:protos.PeerID) - )) -_sym_db.RegisterMessage(PeerID) - -PeerEndpoint = _reflection.GeneratedProtocolMessageType('PeerEndpoint', (_message.Message,), dict( - DESCRIPTOR = _PEERENDPOINT, - __module__ = 'peer.peer_pb2' - # @@protoc_insertion_point(class_scope:protos.PeerEndpoint) - )) -_sym_db.RegisterMessage(PeerEndpoint) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - - - class EndorserStub(object): - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ProcessProposal = channel.unary_unary( - '/protos.Endorser/ProcessProposal', - request_serializer=peer_dot_proposal__pb2.SignedProposal.SerializeToString, - response_deserializer=peer_dot_proposal__response__pb2.ProposalResponse.FromString, - ) - - - class EndorserServicer(object): - - def ProcessProposal(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - - def add_EndorserServicer_to_server(servicer, server): - rpc_method_handlers = { - 'ProcessProposal': grpc.unary_unary_rpc_method_handler( - servicer.ProcessProposal, - request_deserializer=peer_dot_proposal__pb2.SignedProposal.FromString, - response_serializer=peer_dot_proposal__response__pb2.ProposalResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'protos.Endorser', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - class BetaEndorserServicer(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - def ProcessProposal(self, request, context): - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - - class BetaEndorserStub(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - def ProcessProposal(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - raise NotImplementedError() - ProcessProposal.future = None - - - def beta_create_Endorser_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_deserializers = { - ('protos.Endorser', 'ProcessProposal'): peer_dot_proposal__pb2.SignedProposal.FromString, - } - response_serializers = { - ('protos.Endorser', 'ProcessProposal'): peer_dot_proposal__response__pb2.ProposalResponse.SerializeToString, - } - method_implementations = { - ('protos.Endorser', 'ProcessProposal'): face_utilities.unary_unary_inline(servicer.ProcessProposal), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - - def beta_create_Endorser_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_serializers = { - ('protos.Endorser', 'ProcessProposal'): peer_dot_proposal__pb2.SignedProposal.SerializeToString, - } - response_deserializers = { - ('protos.Endorser', 'ProcessProposal'): peer_dot_proposal__response__pb2.ProposalResponse.FromString, - } - cardinalities = { - 'ProcessProposal': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'protos.Endorser', cardinalities, options=stub_options) -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/peer_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/peer_pb2_grpc.py deleted file mode 100644 index 116fc050e..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/peer_pb2_grpc.py +++ /dev/null @@ -1,43 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -import peer.proposal_pb2 as peer_dot_proposal__pb2 -import peer.proposal_response_pb2 as peer_dot_proposal__response__pb2 - - -class EndorserStub(object): - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ProcessProposal = channel.unary_unary( - '/protos.Endorser/ProcessProposal', - request_serializer=peer_dot_proposal__pb2.SignedProposal.SerializeToString, - response_deserializer=peer_dot_proposal__response__pb2.ProposalResponse.FromString, - ) - - -class EndorserServicer(object): - - def ProcessProposal(self, request, context): - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_EndorserServicer_to_server(servicer, server): - rpc_method_handlers = { - 'ProcessProposal': grpc.unary_unary_rpc_method_handler( - servicer.ProcessProposal, - request_deserializer=peer_dot_proposal__pb2.SignedProposal.FromString, - response_serializer=peer_dot_proposal__response__pb2.ProposalResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'protos.Endorser', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/app/platform/fabric/e2e-test/feature/peer/proposal_pb2.py b/app/platform/fabric/e2e-test/feature/peer/proposal_pb2.py deleted file mode 100644 index 57daac9ca..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/proposal_pb2.py +++ /dev/null @@ -1,340 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/proposal.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from peer import chaincode_pb2 as peer_dot_chaincode__pb2 -from peer import proposal_response_pb2 as peer_dot_proposal__response__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/proposal.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x13peer/proposal.proto\x12\x06protos\x1a\x14peer/chaincode.proto\x1a\x1cpeer/proposal_response.proto\";\n\x0eSignedProposal\x12\x16\n\x0eproposal_bytes\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\">\n\x08Proposal\x12\x0e\n\x06header\x18\x01 \x01(\x0c\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\x12\x11\n\textension\x18\x03 \x01(\x0c\"a\n\x18\x43haincodeHeaderExtension\x12\x1a\n\x12payload_visibility\x18\x01 \x01(\x0c\x12)\n\x0c\x63haincode_id\x18\x02 \x01(\x0b\x32\x13.protos.ChaincodeID\"\xa8\x01\n\x18\x43haincodeProposalPayload\x12\r\n\x05input\x18\x01 \x01(\x0c\x12H\n\x0cTransientMap\x18\x02 \x03(\x0b\x32\x32.protos.ChaincodeProposalPayload.TransientMapEntry\x1a\x33\n\x11TransientMapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"V\n\x0f\x43haincodeAction\x12\x0f\n\x07results\x18\x01 \x01(\x0c\x12\x0e\n\x06\x65vents\x18\x02 \x01(\x0c\x12\"\n\x08response\x18\x03 \x01(\x0b\x32\x10.protos.ResponseB`\n\"org.hyperledger.fabric.protos.peerB\x0fProposalPackageZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[peer_dot_chaincode__pb2.DESCRIPTOR,peer_dot_proposal__response__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_SIGNEDPROPOSAL = _descriptor.Descriptor( - name='SignedProposal', - full_name='protos.SignedProposal', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='proposal_bytes', full_name='protos.SignedProposal.proposal_bytes', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signature', full_name='protos.SignedProposal.signature', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=83, - serialized_end=142, -) - - -_PROPOSAL = _descriptor.Descriptor( - name='Proposal', - full_name='protos.Proposal', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='header', full_name='protos.Proposal.header', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='payload', full_name='protos.Proposal.payload', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='extension', full_name='protos.Proposal.extension', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=144, - serialized_end=206, -) - - -_CHAINCODEHEADEREXTENSION = _descriptor.Descriptor( - name='ChaincodeHeaderExtension', - full_name='protos.ChaincodeHeaderExtension', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='payload_visibility', full_name='protos.ChaincodeHeaderExtension.payload_visibility', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chaincode_id', full_name='protos.ChaincodeHeaderExtension.chaincode_id', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=208, - serialized_end=305, -) - - -_CHAINCODEPROPOSALPAYLOAD_TRANSIENTMAPENTRY = _descriptor.Descriptor( - name='TransientMapEntry', - full_name='protos.ChaincodeProposalPayload.TransientMapEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='protos.ChaincodeProposalPayload.TransientMapEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='protos.ChaincodeProposalPayload.TransientMapEntry.value', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=425, - serialized_end=476, -) - -_CHAINCODEPROPOSALPAYLOAD = _descriptor.Descriptor( - name='ChaincodeProposalPayload', - full_name='protos.ChaincodeProposalPayload', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='input', full_name='protos.ChaincodeProposalPayload.input', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='TransientMap', full_name='protos.ChaincodeProposalPayload.TransientMap', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_CHAINCODEPROPOSALPAYLOAD_TRANSIENTMAPENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=308, - serialized_end=476, -) - - -_CHAINCODEACTION = _descriptor.Descriptor( - name='ChaincodeAction', - full_name='protos.ChaincodeAction', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='results', full_name='protos.ChaincodeAction.results', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='events', full_name='protos.ChaincodeAction.events', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='response', full_name='protos.ChaincodeAction.response', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=478, - serialized_end=564, -) - -_CHAINCODEHEADEREXTENSION.fields_by_name['chaincode_id'].message_type = peer_dot_chaincode__pb2._CHAINCODEID -_CHAINCODEPROPOSALPAYLOAD_TRANSIENTMAPENTRY.containing_type = _CHAINCODEPROPOSALPAYLOAD -_CHAINCODEPROPOSALPAYLOAD.fields_by_name['TransientMap'].message_type = _CHAINCODEPROPOSALPAYLOAD_TRANSIENTMAPENTRY -_CHAINCODEACTION.fields_by_name['response'].message_type = peer_dot_proposal__response__pb2._RESPONSE -DESCRIPTOR.message_types_by_name['SignedProposal'] = _SIGNEDPROPOSAL -DESCRIPTOR.message_types_by_name['Proposal'] = _PROPOSAL -DESCRIPTOR.message_types_by_name['ChaincodeHeaderExtension'] = _CHAINCODEHEADEREXTENSION -DESCRIPTOR.message_types_by_name['ChaincodeProposalPayload'] = _CHAINCODEPROPOSALPAYLOAD -DESCRIPTOR.message_types_by_name['ChaincodeAction'] = _CHAINCODEACTION - -SignedProposal = _reflection.GeneratedProtocolMessageType('SignedProposal', (_message.Message,), dict( - DESCRIPTOR = _SIGNEDPROPOSAL, - __module__ = 'peer.proposal_pb2' - # @@protoc_insertion_point(class_scope:protos.SignedProposal) - )) -_sym_db.RegisterMessage(SignedProposal) - -Proposal = _reflection.GeneratedProtocolMessageType('Proposal', (_message.Message,), dict( - DESCRIPTOR = _PROPOSAL, - __module__ = 'peer.proposal_pb2' - # @@protoc_insertion_point(class_scope:protos.Proposal) - )) -_sym_db.RegisterMessage(Proposal) - -ChaincodeHeaderExtension = _reflection.GeneratedProtocolMessageType('ChaincodeHeaderExtension', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEHEADEREXTENSION, - __module__ = 'peer.proposal_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeHeaderExtension) - )) -_sym_db.RegisterMessage(ChaincodeHeaderExtension) - -ChaincodeProposalPayload = _reflection.GeneratedProtocolMessageType('ChaincodeProposalPayload', (_message.Message,), dict( - - TransientMapEntry = _reflection.GeneratedProtocolMessageType('TransientMapEntry', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEPROPOSALPAYLOAD_TRANSIENTMAPENTRY, - __module__ = 'peer.proposal_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeProposalPayload.TransientMapEntry) - )) - , - DESCRIPTOR = _CHAINCODEPROPOSALPAYLOAD, - __module__ = 'peer.proposal_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeProposalPayload) - )) -_sym_db.RegisterMessage(ChaincodeProposalPayload) -_sym_db.RegisterMessage(ChaincodeProposalPayload.TransientMapEntry) - -ChaincodeAction = _reflection.GeneratedProtocolMessageType('ChaincodeAction', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEACTION, - __module__ = 'peer.proposal_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeAction) - )) -_sym_db.RegisterMessage(ChaincodeAction) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerB\017ProposalPackageZ)github.com/hyperledger/fabric/protos/peer')) -_CHAINCODEPROPOSALPAYLOAD_TRANSIENTMAPENTRY.has_options = True -_CHAINCODEPROPOSALPAYLOAD_TRANSIENTMAPENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/proposal_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/proposal_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/proposal_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/peer/proposal_response_pb2.py b/app/platform/fabric/e2e-test/feature/peer/proposal_response_pb2.py deleted file mode 100644 index 09ef33152..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/proposal_response_pb2.py +++ /dev/null @@ -1,259 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/proposal_response.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/proposal_response.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x1cpeer/proposal_response.proto\x12\x06protos\x1a\x1fgoogle/protobuf/timestamp.proto\"\xb1\x01\n\x10ProposalResponse\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12-\n\ttimestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\"\n\x08response\x18\x04 \x01(\x0b\x32\x10.protos.Response\x12\x0f\n\x07payload\x18\x05 \x01(\x0c\x12(\n\x0b\x65ndorsement\x18\x06 \x01(\x0b\x32\x13.protos.Endorsement\"<\n\x08Response\x12\x0e\n\x06status\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\"C\n\x17ProposalResponsePayload\x12\x15\n\rproposal_hash\x18\x01 \x01(\x0c\x12\x11\n\textension\x18\x02 \x01(\x0c\"2\n\x0b\x45ndorsement\x12\x10\n\x08\x65ndorser\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\x42h\n\"org.hyperledger.fabric.protos.peerB\x17ProposalResponsePackageZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_PROPOSALRESPONSE = _descriptor.Descriptor( - name='ProposalResponse', - full_name='protos.ProposalResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='version', full_name='protos.ProposalResponse.version', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp', full_name='protos.ProposalResponse.timestamp', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='response', full_name='protos.ProposalResponse.response', index=2, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='payload', full_name='protos.ProposalResponse.payload', index=3, - number=5, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='endorsement', full_name='protos.ProposalResponse.endorsement', index=4, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=74, - serialized_end=251, -) - - -_RESPONSE = _descriptor.Descriptor( - name='Response', - full_name='protos.Response', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='status', full_name='protos.Response.status', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='message', full_name='protos.Response.message', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='payload', full_name='protos.Response.payload', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=253, - serialized_end=313, -) - - -_PROPOSALRESPONSEPAYLOAD = _descriptor.Descriptor( - name='ProposalResponsePayload', - full_name='protos.ProposalResponsePayload', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='proposal_hash', full_name='protos.ProposalResponsePayload.proposal_hash', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='extension', full_name='protos.ProposalResponsePayload.extension', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=315, - serialized_end=382, -) - - -_ENDORSEMENT = _descriptor.Descriptor( - name='Endorsement', - full_name='protos.Endorsement', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='endorser', full_name='protos.Endorsement.endorser', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signature', full_name='protos.Endorsement.signature', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=384, - serialized_end=434, -) - -_PROPOSALRESPONSE.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_PROPOSALRESPONSE.fields_by_name['response'].message_type = _RESPONSE -_PROPOSALRESPONSE.fields_by_name['endorsement'].message_type = _ENDORSEMENT -DESCRIPTOR.message_types_by_name['ProposalResponse'] = _PROPOSALRESPONSE -DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE -DESCRIPTOR.message_types_by_name['ProposalResponsePayload'] = _PROPOSALRESPONSEPAYLOAD -DESCRIPTOR.message_types_by_name['Endorsement'] = _ENDORSEMENT - -ProposalResponse = _reflection.GeneratedProtocolMessageType('ProposalResponse', (_message.Message,), dict( - DESCRIPTOR = _PROPOSALRESPONSE, - __module__ = 'peer.proposal_response_pb2' - # @@protoc_insertion_point(class_scope:protos.ProposalResponse) - )) -_sym_db.RegisterMessage(ProposalResponse) - -Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict( - DESCRIPTOR = _RESPONSE, - __module__ = 'peer.proposal_response_pb2' - # @@protoc_insertion_point(class_scope:protos.Response) - )) -_sym_db.RegisterMessage(Response) - -ProposalResponsePayload = _reflection.GeneratedProtocolMessageType('ProposalResponsePayload', (_message.Message,), dict( - DESCRIPTOR = _PROPOSALRESPONSEPAYLOAD, - __module__ = 'peer.proposal_response_pb2' - # @@protoc_insertion_point(class_scope:protos.ProposalResponsePayload) - )) -_sym_db.RegisterMessage(ProposalResponsePayload) - -Endorsement = _reflection.GeneratedProtocolMessageType('Endorsement', (_message.Message,), dict( - DESCRIPTOR = _ENDORSEMENT, - __module__ = 'peer.proposal_response_pb2' - # @@protoc_insertion_point(class_scope:protos.Endorsement) - )) -_sym_db.RegisterMessage(Endorsement) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerB\027ProposalResponsePackageZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/proposal_response_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/proposal_response_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/proposal_response_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/peer/query_pb2.py b/app/platform/fabric/e2e-test/feature/peer/query_pb2.py deleted file mode 100644 index e9a3631fb..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/query_pb2.py +++ /dev/null @@ -1,235 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/query.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/query.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x10peer/query.proto\x12\x06protos\"C\n\x16\x43haincodeQueryResponse\x12)\n\nchaincodes\x18\x01 \x03(\x0b\x32\x15.protos.ChaincodeInfo\"g\n\rChaincodeInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x0c\n\x04path\x18\x03 \x01(\t\x12\r\n\x05input\x18\x04 \x01(\t\x12\x0c\n\x04\x65scc\x18\x05 \x01(\t\x12\x0c\n\x04vscc\x18\x06 \x01(\t\"=\n\x14\x43hannelQueryResponse\x12%\n\x08\x63hannels\x18\x01 \x03(\x0b\x32\x13.protos.ChannelInfo\"!\n\x0b\x43hannelInfo\x12\x12\n\nchannel_id\x18\x01 \x01(\tBO\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_CHAINCODEQUERYRESPONSE = _descriptor.Descriptor( - name='ChaincodeQueryResponse', - full_name='protos.ChaincodeQueryResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chaincodes', full_name='protos.ChaincodeQueryResponse.chaincodes', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=28, - serialized_end=95, -) - - -_CHAINCODEINFO = _descriptor.Descriptor( - name='ChaincodeInfo', - full_name='protos.ChaincodeInfo', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='protos.ChaincodeInfo.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='version', full_name='protos.ChaincodeInfo.version', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='path', full_name='protos.ChaincodeInfo.path', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input', full_name='protos.ChaincodeInfo.input', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='escc', full_name='protos.ChaincodeInfo.escc', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='vscc', full_name='protos.ChaincodeInfo.vscc', index=5, - number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=97, - serialized_end=200, -) - - -_CHANNELQUERYRESPONSE = _descriptor.Descriptor( - name='ChannelQueryResponse', - full_name='protos.ChannelQueryResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='channels', full_name='protos.ChannelQueryResponse.channels', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=202, - serialized_end=263, -) - - -_CHANNELINFO = _descriptor.Descriptor( - name='ChannelInfo', - full_name='protos.ChannelInfo', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='channel_id', full_name='protos.ChannelInfo.channel_id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=265, - serialized_end=298, -) - -_CHAINCODEQUERYRESPONSE.fields_by_name['chaincodes'].message_type = _CHAINCODEINFO -_CHANNELQUERYRESPONSE.fields_by_name['channels'].message_type = _CHANNELINFO -DESCRIPTOR.message_types_by_name['ChaincodeQueryResponse'] = _CHAINCODEQUERYRESPONSE -DESCRIPTOR.message_types_by_name['ChaincodeInfo'] = _CHAINCODEINFO -DESCRIPTOR.message_types_by_name['ChannelQueryResponse'] = _CHANNELQUERYRESPONSE -DESCRIPTOR.message_types_by_name['ChannelInfo'] = _CHANNELINFO - -ChaincodeQueryResponse = _reflection.GeneratedProtocolMessageType('ChaincodeQueryResponse', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEQUERYRESPONSE, - __module__ = 'peer.query_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeQueryResponse) - )) -_sym_db.RegisterMessage(ChaincodeQueryResponse) - -ChaincodeInfo = _reflection.GeneratedProtocolMessageType('ChaincodeInfo', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEINFO, - __module__ = 'peer.query_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeInfo) - )) -_sym_db.RegisterMessage(ChaincodeInfo) - -ChannelQueryResponse = _reflection.GeneratedProtocolMessageType('ChannelQueryResponse', (_message.Message,), dict( - DESCRIPTOR = _CHANNELQUERYRESPONSE, - __module__ = 'peer.query_pb2' - # @@protoc_insertion_point(class_scope:protos.ChannelQueryResponse) - )) -_sym_db.RegisterMessage(ChannelQueryResponse) - -ChannelInfo = _reflection.GeneratedProtocolMessageType('ChannelInfo', (_message.Message,), dict( - DESCRIPTOR = _CHANNELINFO, - __module__ = 'peer.query_pb2' - # @@protoc_insertion_point(class_scope:protos.ChannelInfo) - )) -_sym_db.RegisterMessage(ChannelInfo) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/query_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/query_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/query_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/peer/resources_pb2.py b/app/platform/fabric/e2e-test/feature/peer/resources_pb2.py deleted file mode 100644 index 27702668a..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/resources_pb2.py +++ /dev/null @@ -1,301 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/resources.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from common import configtx_pb2 as common_dot_configtx__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/resources.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x14peer/resources.proto\x12\x06protos\x1a\x15\x63ommon/configtx.proto\"!\n\x0b\x41PIResource\x12\x12\n\npolicy_ref\x18\x01 \x01(\t\"4\n\x13\x43haincodeIdentifier\x12\x0c\n\x04hash\x18\x01 \x01(\x0c\x12\x0f\n\x07version\x18\x02 \x01(\t\"5\n\x13\x43haincodeValidation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x61rgument\x18\x02 \x01(\x0c\"*\n\x08VSCCArgs\x12\x1e\n\x16\x65ndorsement_policy_ref\x18\x01 \x01(\t\"$\n\x14\x43haincodeEndorsement\x12\x0c\n\x04name\x18\x01 \x01(\t\"^\n\nConfigTree\x12&\n\x0e\x63hannel_config\x18\x01 \x01(\x0b\x32\x0e.common.Config\x12(\n\x10resources_config\x18\x02 \x01(\x0b\x32\x0e.common.ConfigBO\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[common_dot_configtx__pb2.DESCRIPTOR,]) - - - - -_APIRESOURCE = _descriptor.Descriptor( - name='APIResource', - full_name='protos.APIResource', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='policy_ref', full_name='protos.APIResource.policy_ref', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=55, - serialized_end=88, -) - - -_CHAINCODEIDENTIFIER = _descriptor.Descriptor( - name='ChaincodeIdentifier', - full_name='protos.ChaincodeIdentifier', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='hash', full_name='protos.ChaincodeIdentifier.hash', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='version', full_name='protos.ChaincodeIdentifier.version', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=90, - serialized_end=142, -) - - -_CHAINCODEVALIDATION = _descriptor.Descriptor( - name='ChaincodeValidation', - full_name='protos.ChaincodeValidation', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='protos.ChaincodeValidation.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='argument', full_name='protos.ChaincodeValidation.argument', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=144, - serialized_end=197, -) - - -_VSCCARGS = _descriptor.Descriptor( - name='VSCCArgs', - full_name='protos.VSCCArgs', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='endorsement_policy_ref', full_name='protos.VSCCArgs.endorsement_policy_ref', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=199, - serialized_end=241, -) - - -_CHAINCODEENDORSEMENT = _descriptor.Descriptor( - name='ChaincodeEndorsement', - full_name='protos.ChaincodeEndorsement', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='protos.ChaincodeEndorsement.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=243, - serialized_end=279, -) - - -_CONFIGTREE = _descriptor.Descriptor( - name='ConfigTree', - full_name='protos.ConfigTree', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='channel_config', full_name='protos.ConfigTree.channel_config', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='resources_config', full_name='protos.ConfigTree.resources_config', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=281, - serialized_end=375, -) - -_CONFIGTREE.fields_by_name['channel_config'].message_type = common_dot_configtx__pb2._CONFIG -_CONFIGTREE.fields_by_name['resources_config'].message_type = common_dot_configtx__pb2._CONFIG -DESCRIPTOR.message_types_by_name['APIResource'] = _APIRESOURCE -DESCRIPTOR.message_types_by_name['ChaincodeIdentifier'] = _CHAINCODEIDENTIFIER -DESCRIPTOR.message_types_by_name['ChaincodeValidation'] = _CHAINCODEVALIDATION -DESCRIPTOR.message_types_by_name['VSCCArgs'] = _VSCCARGS -DESCRIPTOR.message_types_by_name['ChaincodeEndorsement'] = _CHAINCODEENDORSEMENT -DESCRIPTOR.message_types_by_name['ConfigTree'] = _CONFIGTREE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -APIResource = _reflection.GeneratedProtocolMessageType('APIResource', (_message.Message,), dict( - DESCRIPTOR = _APIRESOURCE, - __module__ = 'peer.resources_pb2' - # @@protoc_insertion_point(class_scope:protos.APIResource) - )) -_sym_db.RegisterMessage(APIResource) - -ChaincodeIdentifier = _reflection.GeneratedProtocolMessageType('ChaincodeIdentifier', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEIDENTIFIER, - __module__ = 'peer.resources_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeIdentifier) - )) -_sym_db.RegisterMessage(ChaincodeIdentifier) - -ChaincodeValidation = _reflection.GeneratedProtocolMessageType('ChaincodeValidation', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEVALIDATION, - __module__ = 'peer.resources_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeValidation) - )) -_sym_db.RegisterMessage(ChaincodeValidation) - -VSCCArgs = _reflection.GeneratedProtocolMessageType('VSCCArgs', (_message.Message,), dict( - DESCRIPTOR = _VSCCARGS, - __module__ = 'peer.resources_pb2' - # @@protoc_insertion_point(class_scope:protos.VSCCArgs) - )) -_sym_db.RegisterMessage(VSCCArgs) - -ChaincodeEndorsement = _reflection.GeneratedProtocolMessageType('ChaincodeEndorsement', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEENDORSEMENT, - __module__ = 'peer.resources_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeEndorsement) - )) -_sym_db.RegisterMessage(ChaincodeEndorsement) - -ConfigTree = _reflection.GeneratedProtocolMessageType('ConfigTree', (_message.Message,), dict( - DESCRIPTOR = _CONFIGTREE, - __module__ = 'peer.resources_pb2' - # @@protoc_insertion_point(class_scope:protos.ConfigTree) - )) -_sym_db.RegisterMessage(ConfigTree) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/resources_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/resources_pb2_grpc.py deleted file mode 100644 index a89435267..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/resources_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - diff --git a/app/platform/fabric/e2e-test/feature/peer/transaction_pb2.py b/app/platform/fabric/e2e-test/feature/peer/transaction_pb2.py deleted file mode 100644 index b2bee443d..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/transaction_pb2.py +++ /dev/null @@ -1,426 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: peer/transaction.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from peer import proposal_response_pb2 as peer_dot_proposal__response__pb2 -from common import common_pb2 as common_dot_common__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='peer/transaction.proto', - package='protos', - syntax='proto3', - serialized_pb=_b('\n\x16peer/transaction.proto\x12\x06protos\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cpeer/proposal_response.proto\x1a\x13\x63ommon/common.proto\"A\n\x11SignedTransaction\x12\x19\n\x11transaction_bytes\x18\x01 \x01(\x0c\x12\x11\n\tsignature\x18\x02 \x01(\x0c\"]\n\x14ProcessedTransaction\x12-\n\x13transactionEnvelope\x18\x01 \x01(\x0b\x32\x10.common.Envelope\x12\x16\n\x0evalidationCode\x18\x02 \x01(\x05\"9\n\x0bTransaction\x12*\n\x07\x61\x63tions\x18\x01 \x03(\x0b\x32\x19.protos.TransactionAction\"4\n\x11TransactionAction\x12\x0e\n\x06header\x18\x01 \x01(\x0c\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"m\n\x16\x43haincodeActionPayload\x12\"\n\x1a\x63haincode_proposal_payload\x18\x01 \x01(\x0c\x12/\n\x06\x61\x63tion\x18\x02 \x01(\x0b\x32\x1f.protos.ChaincodeEndorsedAction\"g\n\x17\x43haincodeEndorsedAction\x12!\n\x19proposal_response_payload\x18\x01 \x01(\x0c\x12)\n\x0c\x65ndorsements\x18\x02 \x03(\x0b\x32\x13.protos.Endorsement*\xc2\x03\n\x10TxValidationCode\x12\t\n\x05VALID\x10\x00\x12\x10\n\x0cNIL_ENVELOPE\x10\x01\x12\x0f\n\x0b\x42\x41\x44_PAYLOAD\x10\x02\x12\x15\n\x11\x42\x41\x44_COMMON_HEADER\x10\x03\x12\x19\n\x15\x42\x41\x44_CREATOR_SIGNATURE\x10\x04\x12 \n\x1cINVALID_ENDORSER_TRANSACTION\x10\x05\x12\x1e\n\x1aINVALID_CONFIG_TRANSACTION\x10\x06\x12\x1a\n\x16UNSUPPORTED_TX_PAYLOAD\x10\x07\x12\x15\n\x11\x42\x41\x44_PROPOSAL_TXID\x10\x08\x12\x12\n\x0e\x44UPLICATE_TXID\x10\t\x12\x1e\n\x1a\x45NDORSEMENT_POLICY_FAILURE\x10\n\x12\x16\n\x12MVCC_READ_CONFLICT\x10\x0b\x12\x19\n\x15PHANTOM_READ_CONFLICT\x10\x0c\x12\x13\n\x0fUNKNOWN_TX_TYPE\x10\r\x12\x1a\n\x16TARGET_CHAIN_NOT_FOUND\x10\x0e\x12\x14\n\x10MARSHAL_TX_ERROR\x10\x0f\x12\x10\n\x0cNIL_TXACTION\x10\x10\x12\x19\n\x14INVALID_OTHER_REASON\x10\xff\x01\x42\x63\n\"org.hyperledger.fabric.protos.peerB\x12TransactionPackageZ)github.com/hyperledger/fabric/protos/peerb\x06proto3') - , - dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,peer_dot_proposal__response__pb2.DESCRIPTOR,common_dot_common__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -_TXVALIDATIONCODE = _descriptor.EnumDescriptor( - name='TxValidationCode', - full_name='protos.TxValidationCode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='VALID', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NIL_ENVELOPE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BAD_PAYLOAD', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BAD_COMMON_HEADER', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BAD_CREATOR_SIGNATURE', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INVALID_ENDORSER_TRANSACTION', index=5, number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INVALID_CONFIG_TRANSACTION', index=6, number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='UNSUPPORTED_TX_PAYLOAD', index=7, number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BAD_PROPOSAL_TXID', index=8, number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DUPLICATE_TXID', index=9, number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ENDORSEMENT_POLICY_FAILURE', index=10, number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MVCC_READ_CONFLICT', index=11, number=11, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PHANTOM_READ_CONFLICT', index=12, number=12, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='UNKNOWN_TX_TYPE', index=13, number=13, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TARGET_CHAIN_NOT_FOUND', index=14, number=14, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MARSHAL_TX_ERROR', index=15, number=15, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NIL_TXACTION', index=16, number=16, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INVALID_OTHER_REASON', index=17, number=255, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=610, - serialized_end=1060, -) -_sym_db.RegisterEnumDescriptor(_TXVALIDATIONCODE) - -TxValidationCode = enum_type_wrapper.EnumTypeWrapper(_TXVALIDATIONCODE) -VALID = 0 -NIL_ENVELOPE = 1 -BAD_PAYLOAD = 2 -BAD_COMMON_HEADER = 3 -BAD_CREATOR_SIGNATURE = 4 -INVALID_ENDORSER_TRANSACTION = 5 -INVALID_CONFIG_TRANSACTION = 6 -UNSUPPORTED_TX_PAYLOAD = 7 -BAD_PROPOSAL_TXID = 8 -DUPLICATE_TXID = 9 -ENDORSEMENT_POLICY_FAILURE = 10 -MVCC_READ_CONFLICT = 11 -PHANTOM_READ_CONFLICT = 12 -UNKNOWN_TX_TYPE = 13 -TARGET_CHAIN_NOT_FOUND = 14 -MARSHAL_TX_ERROR = 15 -NIL_TXACTION = 16 -INVALID_OTHER_REASON = 255 - - - -_SIGNEDTRANSACTION = _descriptor.Descriptor( - name='SignedTransaction', - full_name='protos.SignedTransaction', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='transaction_bytes', full_name='protos.SignedTransaction.transaction_bytes', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='signature', full_name='protos.SignedTransaction.signature', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=118, - serialized_end=183, -) - - -_PROCESSEDTRANSACTION = _descriptor.Descriptor( - name='ProcessedTransaction', - full_name='protos.ProcessedTransaction', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='transactionEnvelope', full_name='protos.ProcessedTransaction.transactionEnvelope', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='validationCode', full_name='protos.ProcessedTransaction.validationCode', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=185, - serialized_end=278, -) - - -_TRANSACTION = _descriptor.Descriptor( - name='Transaction', - full_name='protos.Transaction', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='actions', full_name='protos.Transaction.actions', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=280, - serialized_end=337, -) - - -_TRANSACTIONACTION = _descriptor.Descriptor( - name='TransactionAction', - full_name='protos.TransactionAction', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='header', full_name='protos.TransactionAction.header', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='payload', full_name='protos.TransactionAction.payload', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=339, - serialized_end=391, -) - - -_CHAINCODEACTIONPAYLOAD = _descriptor.Descriptor( - name='ChaincodeActionPayload', - full_name='protos.ChaincodeActionPayload', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chaincode_proposal_payload', full_name='protos.ChaincodeActionPayload.chaincode_proposal_payload', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='action', full_name='protos.ChaincodeActionPayload.action', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=393, - serialized_end=502, -) - - -_CHAINCODEENDORSEDACTION = _descriptor.Descriptor( - name='ChaincodeEndorsedAction', - full_name='protos.ChaincodeEndorsedAction', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='proposal_response_payload', full_name='protos.ChaincodeEndorsedAction.proposal_response_payload', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='endorsements', full_name='protos.ChaincodeEndorsedAction.endorsements', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=504, - serialized_end=607, -) - -_PROCESSEDTRANSACTION.fields_by_name['transactionEnvelope'].message_type = common_dot_common__pb2._ENVELOPE -_TRANSACTION.fields_by_name['actions'].message_type = _TRANSACTIONACTION -_CHAINCODEACTIONPAYLOAD.fields_by_name['action'].message_type = _CHAINCODEENDORSEDACTION -_CHAINCODEENDORSEDACTION.fields_by_name['endorsements'].message_type = peer_dot_proposal__response__pb2._ENDORSEMENT -DESCRIPTOR.message_types_by_name['SignedTransaction'] = _SIGNEDTRANSACTION -DESCRIPTOR.message_types_by_name['ProcessedTransaction'] = _PROCESSEDTRANSACTION -DESCRIPTOR.message_types_by_name['Transaction'] = _TRANSACTION -DESCRIPTOR.message_types_by_name['TransactionAction'] = _TRANSACTIONACTION -DESCRIPTOR.message_types_by_name['ChaincodeActionPayload'] = _CHAINCODEACTIONPAYLOAD -DESCRIPTOR.message_types_by_name['ChaincodeEndorsedAction'] = _CHAINCODEENDORSEDACTION -DESCRIPTOR.enum_types_by_name['TxValidationCode'] = _TXVALIDATIONCODE - -SignedTransaction = _reflection.GeneratedProtocolMessageType('SignedTransaction', (_message.Message,), dict( - DESCRIPTOR = _SIGNEDTRANSACTION, - __module__ = 'peer.transaction_pb2' - # @@protoc_insertion_point(class_scope:protos.SignedTransaction) - )) -_sym_db.RegisterMessage(SignedTransaction) - -ProcessedTransaction = _reflection.GeneratedProtocolMessageType('ProcessedTransaction', (_message.Message,), dict( - DESCRIPTOR = _PROCESSEDTRANSACTION, - __module__ = 'peer.transaction_pb2' - # @@protoc_insertion_point(class_scope:protos.ProcessedTransaction) - )) -_sym_db.RegisterMessage(ProcessedTransaction) - -Transaction = _reflection.GeneratedProtocolMessageType('Transaction', (_message.Message,), dict( - DESCRIPTOR = _TRANSACTION, - __module__ = 'peer.transaction_pb2' - # @@protoc_insertion_point(class_scope:protos.Transaction) - )) -_sym_db.RegisterMessage(Transaction) - -TransactionAction = _reflection.GeneratedProtocolMessageType('TransactionAction', (_message.Message,), dict( - DESCRIPTOR = _TRANSACTIONACTION, - __module__ = 'peer.transaction_pb2' - # @@protoc_insertion_point(class_scope:protos.TransactionAction) - )) -_sym_db.RegisterMessage(TransactionAction) - -ChaincodeActionPayload = _reflection.GeneratedProtocolMessageType('ChaincodeActionPayload', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEACTIONPAYLOAD, - __module__ = 'peer.transaction_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeActionPayload) - )) -_sym_db.RegisterMessage(ChaincodeActionPayload) - -ChaincodeEndorsedAction = _reflection.GeneratedProtocolMessageType('ChaincodeEndorsedAction', (_message.Message,), dict( - DESCRIPTOR = _CHAINCODEENDORSEDACTION, - __module__ = 'peer.transaction_pb2' - # @@protoc_insertion_point(class_scope:protos.ChaincodeEndorsedAction) - )) -_sym_db.RegisterMessage(ChaincodeEndorsedAction) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerB\022TransactionPackageZ)github.com/hyperledger/fabric/protos/peer')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces -except ImportError: - pass -# @@protoc_insertion_point(module_scope) diff --git a/app/platform/fabric/e2e-test/feature/peer/transaction_pb2_grpc.py b/app/platform/fabric/e2e-test/feature/peer/transaction_pb2_grpc.py deleted file mode 100644 index d5557c123..000000000 --- a/app/platform/fabric/e2e-test/feature/peer/transaction_pb2_grpc.py +++ /dev/null @@ -1,5 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - diff --git a/app/platform/fabric/e2e-test/feature/requirement.txt b/app/platform/fabric/e2e-test/feature/requirement.txt deleted file mode 100644 index c92026626..000000000 --- a/app/platform/fabric/e2e-test/feature/requirement.txt +++ /dev/null @@ -1,27 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -behave -nose -flask==1.0.3 -python-dateutil==2.2 -pytz==2014.3 -pyyaml==5.1 -couchdb==1.0 -flask-cors==2.0.1 -requests -pyOpenSSL==19.0.0 -pysha3==1.0b1 -six -grpcio -ecdsa -python-slugify -b3j0f.aop -google -protobuf -pykafka -pyexecjs -cython -psutil -assertpy -behave_rest diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/jar-with-dependencies-exclude-resources.xml b/app/platform/fabric/e2e-test/feature/sdk/java/jar-with-dependencies-exclude-resources.xml deleted file mode 100644 index 607c251d9..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/jar-with-dependencies-exclude-resources.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - jar-with-dependencies-exclude-resources - - jar - - false - - - / - false - true - runtime - - - - - / - ${project.build.outputDirectory} - - out/** - - - - diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/package.sh b/app/platform/fabric/e2e-test/feature/sdk/java/package.sh deleted file mode 100755 index e78783e1e..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/package.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -e - -WD=$GOPATH/src/github.com/hyperledger/fabric-test/feature/sdk/java -cd $WD - -echo "======== Build Java SDK wrapper ======" -mvn package -cp target/peer-javasdk-test-jar-with-dependencies-exclude-resources.jar peer-javasdk.jar -echo "jar file located in $WD ======" diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/pom.xml b/app/platform/fabric/e2e-test/feature/sdk/java/pom.xml deleted file mode 100644 index c9bbc4e0b..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/pom.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - 4.0.0 - - org.hyperledger.fabric_test - peer-javasdk - test - - - - org.apache.maven.plugins - maven-compiler-plugin - - 1.8 - 1.8 - - - - org.apache.maven.plugins - maven-assembly-plugin - - - - true - org.hyperledger.fabric_test.operations.PeerOperations - - - - jar-with-dependencies-exclude-resources.xml - - - - - only - package - - single - - - - - - - - - snapshots-repo - https://oss.sonatype.org/content/repositories/snapshots - - false - - - true - - - - - - - org.hyperledger.fabric-sdk-java - fabric-sdk-java - 1.3.0-SNAPSHOT - - - - com.beust - jcommander - 1.72 - - - commons-lang - commons-lang - 2.6 - - - diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/src/META-INF/MANIFEST.MF b/app/platform/fabric/e2e-test/feature/sdk/java/src/META-INF/MANIFEST.MF deleted file mode 100644 index 6f849ec07..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/src/META-INF/MANIFEST.MF +++ /dev/null @@ -1,3 +0,0 @@ -Manifest-Version: 1.0 -Main-Class: org.hyperledger.fabric_test.operations.PeerOperations - diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/operations/CreateAppUser.java b/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/operations/CreateAppUser.java deleted file mode 100644 index 415aef229..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/operations/CreateAppUser.java +++ /dev/null @@ -1,6 +0,0 @@ -package org.hyperledger.fabric_test.operations; - -public class CreateAppUser { - public static void main(String[] args) { - } -} diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/operations/PeerOperations.java b/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/operations/PeerOperations.java deleted file mode 100644 index cb7934aae..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/operations/PeerOperations.java +++ /dev/null @@ -1,375 +0,0 @@ -package org.hyperledger.fabric_test.operations; - -import com.beust.jcommander.JCommander; -import com.beust.jcommander.Parameter; -import com.google.gson.*; -import org.hyperledger.fabric.sdk.exception.*; -import org.hyperledger.fabric_ca.sdk.exception.EnrollmentException; -import org.hyperledger.fabric_ca.sdk.exception.InfoException; -import org.hyperledger.fabric_test.structures.AppUser; -import org.apache.log4j.BasicConfigurator; -import org.apache.commons.lang.WordUtils; -import org.hyperledger.fabric.sdk.*; -import org.hyperledger.fabric.sdk.security.CryptoSuite; -import org.hyperledger.fabric.sdk.security.CryptoPrimitives; - -import java.io.*; -import java.lang.reflect.InvocationTargetException; -import java.nio.charset.Charset; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.NoSuchAlgorithmException; -import java.security.PrivateKey; -import java.security.spec.InvalidKeySpecException; -import java.security.cert.CertificateException; -import java.util.*; -import java.util.AbstractMap.SimpleEntry; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -public class PeerOperations { - - // Globals - // Configuration Path - @Parameter(names={"--configpath", "-c"}) - private static String configPath = "../../configs"; - - // Peer name - @Parameter(names={"--peername", "-n"}) - private static String peerName = "peer0.org1.example.com"; - // Peer IP address - @Parameter(names={"--peerip", "-i"}) - private static String peerIp; - // Peer port - @Parameter(names={"--peerport", "-p"}) - private static String peerPort; - // Command - @Parameter(names={"--operation", "-o"}) - private static String operationStr; - // Organization Name - @Parameter(names={"--mspid", "-r"}) - private static String mspId = "org1.example.com"; - - // Orderer - @Parameter(names={"--orderer", "-d"}) - private static String orderer = "orderer0.example.com"; - // Network ID - @Parameter(names={"--networkid", "-e"}) - private static String networkID = ""; - // CA Certificate Path - @Parameter(names={"--cacertpath", "-a"}) - private static String cacertPath; - // Server CA Certificate Path - @Parameter(names={"--srvcertpath", "-s"}) - private static String srvcertPath; - - // Channel Name - @Parameter(names={"--channelname", "-h"}) - private static String channelName; - // Chaincode Name - @Parameter(names={"--ccname", "-m"}) - private static String ccName; - // Chaincode version - @Parameter(names={"--ccversion", "-v"}) - private static String ccVersion; - // Chaincode Path - @Parameter(names={"--ccpath", "-t"}) - private static String ccPath; - // Chaincode Func - @Parameter(names={"--ccfunc", "-f"}) - private static String ccFunc; - // Chaincode Args - @Parameter(names={"--ccargs", "-g"}) - private static String ccargs; - - // UserName - @Parameter(names={"--user", "-u"}) - private static String userName; - // User Password - @Parameter(names={"--userpasswd", "-w"}) - private static String userPassword; - - private static Map operationMap() { - return Collections.unmodifiableMap(Stream.of( - new SimpleEntry<>("join", Operation.CHANNEL_JOIN), - new SimpleEntry<>("install", Operation.CC_INSTALL), - new SimpleEntry<>("instantiate", Operation.CC_INSTANTIATE), - new SimpleEntry<>("upgrade", Operation.CC_UPGRADE), - new SimpleEntry<>("query", Operation.CC_QUERY), - new SimpleEntry<>("invoke", Operation.CC_INVOKE) - ).collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))); - } - private static JsonObject connectionProfile; - - public static void main(String ... argv) throws Exception { - PeerOperations main = new PeerOperations(); - JCommander.newBuilder() - .addObject(main) - .build() - .parse(argv); - main.run(); - } - - public void run() throws IOException, InvalidArgumentException, ProposalException, TransactionException, IllegalAccessException, InstantiationException, ClassNotFoundException, NoSuchMethodException, InvocationTargetException, CryptoException, InfoException, org.hyperledger.fabric_ca.sdk.exception.InvalidArgumentException, EnrollmentException, InvalidKeySpecException, NoSuchAlgorithmException, NetworkConfigurationException, CertificateException { - BasicConfigurator.configure(); - - // Using enums and putting this check up here instead of just using a switch-case statement directly - // so as to avoid unnecessary overhead by setting up client and peer - Operation operation = operationMap().getOrDefault(operationStr, Operation.INVALID); - if (operation == Operation.INVALID) { - System.out.println("Unknown command."); - System.exit(1); - } - - String orgName = WordUtils.capitalize(mspId.replace(".", " ")).replace(" ", ""); - connectionProfile = getConnectionProfile(); - String enrollId = String.format("%s@%s", userName, mspId); - - HFClient _client = HFClient.createNewInstance(); - _client.setCryptoSuite(CryptoSuite.Factory.getCryptoSuite()); - - AppUser appUser = new AppUser(enrollId, orgName, mspId); - - JsonObject peerInfo = connectionProfile.getAsJsonObject("peers").getAsJsonObject(peerName); - String peerLocation = peerInfo.get("url").getAsString(); - - Properties peerProperties = new Properties(); - peerProperties.put("pemBytes", - peerInfo.getAsJsonObject("tlsCACerts").get("pem").getAsString().getBytes()); - peerProperties.put("grpc.NettyChannelBuilderOption.keepAliveTime", new Object[] {8L, TimeUnit.MINUTES}); - peerProperties.put("grpc.NettyChannelBuilderOption.keepAliveTimeout", new Object[] {8L, TimeUnit.SECONDS}); - peerProperties.put("grpc.NettyChannelBuilderOption.keepAliveWithoutCalls", new Object[] {true}); - - appUser.setEnrollment(getEnrollment(connectionProfile, mspId, userName, userPassword)); - _client.setUserContext(appUser); - - Peer peer = _client.newPeer(peerName, peerLocation, peerProperties); - Channel channel = getChannel(channelName, _client); - - System.out.println("Perform operation..."); - if (operation == Operation.CC_INSTALL) { - installChaincode(ccName, ccVersion, ccPath, _client, Collections.singletonList(peer)); - System.exit(0); - } - - if (operation != Operation.CHANNEL_JOIN) { - channel.addPeer(peer); - channel.initialize(); - } - - // Cases CC_INSTALL and INVALID are unreachable at this point - switch (operation) { - case CHANNEL_JOIN: - joinChannel(channel, peer); - break; - case CC_INSTANTIATE: { - ArrayList ccArgs = arrayFromJsonString(ccargs); - instantiateChaincode(ccName, ccVersion, channel, ccArgs, _client, Collections.singletonList(peer)); - break; - } - case CC_UPGRADE: { - upgradeChaincode(ccName, ccVersion, channel, _client, Collections.singletonList(peer)); - break; - } - case CC_QUERY: { - System.out.println(ccargs); - ArrayList ccArgs = arrayFromJsonString(ccargs); - sendQuery(ccName, ccFunc, ccArgs, channel, _client); - break; - } - case CC_INVOKE: { - ArrayList ccArgs = arrayFromJsonString(ccargs); - invokeTransaction(ccName, ccFunc, ccArgs, channel, _client, Collections.singletonList(peer)); - break; - } - } - } - - private static Enrollment getEnrollment(JsonObject connectionProfile, String mspId, String userName, String password) throws NetworkConfigurationException, IOException, InvalidArgumentException, org.hyperledger.fabric_ca.sdk.exception.InvalidArgumentException, IllegalAccessException, InvocationTargetException, InstantiationException, NoSuchMethodException, CryptoException, ClassNotFoundException, EnrollmentException, InvalidKeySpecException, NoSuchAlgorithmException, CertificateException { - String enrollId = String.format("%s@%s", userName, mspId); - Path filePath = Paths.get(configPath,"peerOrganizations", mspId, "users", enrollId, "msp", "keystore"); - File keyDir = new File(filePath.toString()); - File[] listOfFiles = keyDir.listFiles(); - System.out.println(filePath.toString()); - assert listOfFiles != null : "There are no files in the filepath"; - System.out.println(listOfFiles[0].getName()); - Path keyPath = Paths.get(configPath,"peerOrganizations", mspId, "users", enrollId, "msp", "keystore", listOfFiles[0].getName()); - String keyString = new String(Files.readAllBytes(keyPath)); - PrivateKey keyPem = getPrivateKeyFromPEMString(keyString); - Path certPath = Paths.get(configPath,"peerOrganizations", mspId, "users", enrollId, "msp", "signcerts", String.format("%s-cert.pem", enrollId)); - String certPem = new String(Files.readAllBytes(certPath)); - return new Enrollment() { - @Override - public PrivateKey getKey() { - return keyPem; - } - @Override - public String getCert() { - return certPem; - } - }; - } - - private static JsonObject getConnectionProfile() throws IOException { - String connectionProfileStr = - new String(Files.readAllBytes(Paths.get(configPath, "network-config.json"))); - return new JsonParser().parse(connectionProfileStr).getAsJsonObject(); - } - - private static ArrayList arrayFromJsonString(String jsonArrayStr) { - JsonArray jsonArray = new JsonParser().parse(jsonArrayStr).getAsJsonArray(); - System.out.println(jsonArrayStr); - System.out.println(jsonArray); - ArrayList myArray = new ArrayList<>(); - for (JsonElement item : jsonArray) { - myArray.add((T) item.getAsString()); - } - return myArray; - } - - private static PrivateKey getPrivateKeyFromPEMString(String privatePem) throws IOException, NoSuchAlgorithmException, InvalidKeySpecException, IllegalAccessException, InstantiationException, ClassNotFoundException, CryptoException, InvalidArgumentException, CertificateException { - CryptoPrimitives crypto = new CryptoPrimitives(); - crypto.init(); - return crypto.bytesToPrivateKey(privatePem.getBytes()); - } - - private static Channel getChannel(String channelName, HFClient client) - throws InvalidArgumentException, NetworkConfigurationException, IOException, TransactionException { - System.out.println("Fetching channel " + channelName); - - File configFile = new File(Paths.get(configPath, "network-config.json").toString()); - NetworkConfig config = NetworkConfig.fromJsonFile(configFile); - Channel channel = client.loadChannelFromConfig(channelName, config); - - Properties ordererProperties = new Properties(); - ordererProperties.put("pemBytes", - connectionProfile.getAsJsonObject("orderers") - .getAsJsonObject("orderer0.example.com") - .getAsJsonObject("tlsCACerts").get("pem").getAsString().getBytes()); - ordererProperties.put("grpc.NettyChannelBuilderOption.keepAliveTime", new Object[] {5L, TimeUnit.MINUTES}); - ordererProperties.put("grpc.NettyChannelBuilderOption.keepAliveTimeout", new Object[] {8L, TimeUnit.SECONDS}); - ordererProperties.put("grpc.NettyChannelBuilderOption.keepAliveWithoutCalls", new Object[] {true}); - ordererProperties.put("grpc.NettyChannelBuilderOption.forTarget", new Object[] {"example.com"}); - Orderer orderer = client.newOrderer( - "orderer0.example.com", - connectionProfile.getAsJsonObject("orderers") - .getAsJsonObject("orderer0.example.com").get("url").getAsString(), - ordererProperties - ); - channel.addOrderer(orderer); - - return channel; - } - - private static void joinChannel(Channel channel, Peer peer) { - System.out.println("Joining channel..."); - - try { - channel.joinPeer(peer); - System.out.println("Joined channel " + channel.getName()); - } catch (ProposalException ex) { - System.out.println("Channel join failed. Is the peer " + peer.getName() - + " already joined to channel " + channel.getName() + "?"); - ex.printStackTrace(); - } - } - - private static void installChaincode(String ccName, String ccVersion, String ccPath, - HFClient client, Collection peers) - throws InvalidArgumentException, ProposalException { - System.out.println("Installing chaincode " + ccName + ":" + ccVersion + " (located at $GOPATH/src/" + ccPath - + " on peers " + peers + "."); - InstallProposalRequest installProposalRequest = client.newInstallProposalRequest(); - String gopath = Paths.get(System.getenv("GOPATH")).toString(); - installProposalRequest.setChaincodeSourceLocation(new File(gopath)); - installProposalRequest.setChaincodeName(ccName); - installProposalRequest.setChaincodeVersion(ccVersion); - installProposalRequest.setChaincodePath(ccPath); - installProposalRequest.setArgs(new ArrayList<>()); - client.sendInstallProposal(installProposalRequest, peers); - } - - private static void instantiateChaincode(String ccName, String ccVersion, - Channel channel, ArrayList ccArgs, - HFClient client, Collection peers) - throws InvalidArgumentException, ProposalException { - System.out.println("Instantiating chaincode " + ccName + ":" + ccVersion - + " on channel " + channel.getName() + "."); - InstantiateProposalRequest instantiateRequest = client.newInstantiationProposalRequest(); - instantiateRequest.setChaincodeName(ccName); - instantiateRequest.setChaincodeVersion(ccVersion); - instantiateRequest.setArgs(ccArgs); - instantiateRequest.setTransientMap(Collections.emptyMap()); - - Collection responses = channel.sendInstantiationProposal(instantiateRequest, peers); - - System.out.println("Sending transaction to orderer to be committed in the ledger."); - channel.sendTransaction(responses, client.getUserContext()); - System.out.println("Transaction committed successfully."); - } - - private static void upgradeChaincode(String ccName, String ccVersion, - Channel channel, HFClient client, Collection peers) - throws InvalidArgumentException, ProposalException { - System.out.println("Upgrading chaincode " + ccName + " to version " + ccVersion - + " on channel " + channel.getName() + "."); - UpgradeProposalRequest upgradeRequest = client.newUpgradeProposalRequest(); - upgradeRequest.setChaincodeName(ccName); - upgradeRequest.setChaincodeVersion(ccVersion); - upgradeRequest.setArgs(new ArrayList<>()); - upgradeRequest.setTransientMap(Collections.emptyMap()); - - Collection responses = channel.sendUpgradeProposal(upgradeRequest, peers); - - System.out.println("Sending transaction to orderer to be committed in the ledger."); - channel.sendTransaction(responses, client.getUserContext()); - System.out.println("Transaction committed successfully."); - } - - private static void sendQuery(String ccName, String function, List args, - Channel channel, HFClient client) - throws InvalidArgumentException, ProposalException { - System.out.println("Querying " + channel.getName() + " with function " + function + " using " + ccName + "."); - QueryByChaincodeRequest query = client.newQueryProposalRequest(); - query.setChaincodeID(ChaincodeID.newBuilder() - .setName(ccName) - .build()); - query.setChaincodeName(ccName); - query.setFcn(function); - query.setArgs(new ArrayList<>(args)); - - ArrayList responses = new ArrayList<>(channel.queryByChaincode(query)); - System.out.println("Response:"); - Gson gson = new GsonBuilder().setPrettyPrinting().create(); - JsonParser parser = new JsonParser(); - JsonElement element = parser.parse(new String(responses.get(0).getChaincodeActionResponsePayload(), - Charset.defaultCharset())); - System.out.println(gson.toJson(element)); - } - - private static void invokeTransaction(String ccName, String function, List args, - Channel channel, HFClient client, Collection peers) - throws InvalidArgumentException, ProposalException { - System.out.println("Sending transaction proposal to " + channel.getName() + " with function " - + function + " using " + ccName + "."); - System.out.println("\t Args: " + args); - TransactionProposalRequest invokeRequest = client.newTransactionProposalRequest(); - invokeRequest.setChaincodeID(ChaincodeID.newBuilder() - .setName(ccName) - .build()); - invokeRequest.setChaincodeName(ccName); - invokeRequest.setFcn(function); - invokeRequest.setArgs(new ArrayList<>(args)); - - Collection responses = channel.sendTransactionProposal(invokeRequest, peers); - - System.out.println("Sending transaction to orderer to be committed in the ledger."); - channel.sendTransaction(responses, client.getUserContext()); - } - - private enum Operation { - CHANNEL_JOIN, CC_INSTALL, CC_INSTANTIATE, CC_UPGRADE, CC_QUERY, CC_INVOKE, INVALID - } -} diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppEnrollment.java b/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppEnrollment.java deleted file mode 100644 index 813be618a..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppEnrollment.java +++ /dev/null @@ -1,20 +0,0 @@ -package org.hyperledger.fabric_test.structures; - -import org.hyperledger.fabric.sdk.Enrollment; - -import java.security.PrivateKey; - -public class AppEnrollment implements Enrollment { - private PrivateKey _key; - private String _cert; - - @Override - public PrivateKey getKey() { - return _key; - } - - @Override - public String getCert() { - return _cert; - } -} diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppPrivateKey.java b/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppPrivateKey.java deleted file mode 100644 index 4d1981b6b..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppPrivateKey.java +++ /dev/null @@ -1,20 +0,0 @@ -package org.hyperledger.fabric_test.structures; - -import java.security.PrivateKey; - -public class AppPrivateKey implements PrivateKey { - @Override - public String getAlgorithm() { - return null; - } - - @Override - public String getFormat() { - return null; - } - - @Override - public byte[] getEncoded() { - return new byte[0]; - } -} diff --git a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppUser.java b/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppUser.java deleted file mode 100644 index 6ca79b2b2..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/java/src/main/java/org/hyperledger/fabric_test/structures/AppUser.java +++ /dev/null @@ -1,107 +0,0 @@ -package org.hyperledger.fabric_test.structures; - -import org.hyperledger.fabric.sdk.Enrollment; -import org.hyperledger.fabric.sdk.User; - -import java.io.Serializable; -import java.util.Set; - -public class AppUser implements User, Serializable { - - private static final long serializationid = 1141L; - - private String _name, _account, _affiliation, _mspId; - private Set _roles; - private Enrollment _enrollment; - - public AppUser(String name, String affiliation, String mspId, Enrollment enrollment) { - setName(name); - setAffiliation(affiliation); - setMspId(mspId); - setEnrollment(enrollment); - } - - public AppUser(String name, String affiliation, String mspId) { - setName(name); - setAffiliation(affiliation); - setMspId(mspId); - } - - @Override - public String getName() { - return _name; - } - - public void setName(String name) { - _name = name; - } - - @Override - public Set getRoles() { - return _roles; - } - - public void setRoles(Set roles) { - _roles = roles; - } - - @Override - public String getAccount() { - return _account; - } - - public void setAccount(String account) { - _account = account; - } - - @Override - public String getAffiliation() { - return _affiliation; - } - - public void setAffiliation(String affiliation) { - _affiliation = affiliation; - } - - @Override - public Enrollment getEnrollment() { - return _enrollment; - } - - public void setEnrollment(Enrollment enrollment) { - _enrollment = enrollment; - } - - @Override - public String getMspId() { - return _mspId; - } - - public void setMspId(String mspId) { - _mspId = mspId; - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append( - "AppUser { \n" - + " name: " + _name + ", \n" - + " roles: [ \n" - ); - if (_roles != null) { - for (String role : _roles) { - builder.append(" " + role + "\n"); - } - } - builder.append( - " ], \n" - + " account: " + _account + ", \n" - + " affiliation: " + _affiliation + ", \n" - + " mspId: " + _mspId + "\n" - + "}" - ); - - return builder.toString(); - } -} diff --git a/app/platform/fabric/e2e-test/feature/sdk/node/common.js b/app/platform/fabric/e2e-test/feature/sdk/node/common.js deleted file mode 100644 index 912762477..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/node/common.js +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Copyright IBM Corp All Rights Reserved - * - * SPDX-License-Identifier: Apache-2.0 - */ -'use strict'; -const log4js = require('log4js'); -const logger = log4js.getLogger('SDK_INT'); - -const path = require('path'); -const util = require('util'); -const fs = require('fs-extra'); -const Client = require('fabric-client'); - -function setupPeers(peers, channel, org, client, network_config, tls) { - let nodes = network_config[org]['peers']; - for (let key in nodes) { - if (peers.indexOf(key) >= 0) { - let peer; - if (tls === true){ - let data = fs.readFileSync(nodes[key].tls_cacerts); - peer = client.newPeer( - nodes[key].requests, { - pem: Buffer.from(data).toString(), - 'ssl-target-name-override': key - } - ); - } else { - peer = client.newPeer(nodes[key].requests); - } - peer.setName(key); - channel.addPeer(peer); - } - } -} - -var newPeers = function(names, org, network_config, client) { - return newRemotes(names, true, org, network_config, client); -}; - -var newEventHubs = function(names, org, network_config, client) { - return newRemotes(names, false, org, network_config, client); -}; - -function readAllFiles(dir) { - var files = fs.readdirSync(dir); - var certs = []; - files.forEach((file_name) => { - let file_path = path.join(dir,file_name); - let data = fs.readFileSync(file_path); - certs.push(data); - }); - return certs; -} - -function getKeyStoreForOrg(org) { - // console.info("???" + Client.getConfigSetting('keyValueStore') + '_' + org); - return Client.getConfigSetting('keyValueStore') + '_' + org; -} - -function newRemotes(names, forPeers, userOrg, network_config, client) { - let targets = []; - // find the peer that match the names - for (let idx in names) { - let peerName = names[idx]; - let nodes = network_config[userOrg]['peers']; - if (nodes[peerName]) { - // found a peer matching the name - let data = fs.readFileSync(nodes[peerName].tls_cacerts); - let grpcOpts = { - pem: Buffer.from(data).toString(), - 'ssl-target-name-override': ''+peerName - }; - if (forPeers) { - targets.push(client.newPeer(nodes[peerName].requests, grpcOpts)); - } else { - let eh = client.newEventHub(); - eh.setPeerAddr(nodes[peerName].events, grpcOpts); - targets.push(eh); - } - } - } - - if (targets.length === 0) { - logger.error(util.format('Failed to find peers matching the names %s', names)); - } - return targets; -} - -function newOrderer(client, network_config, orderer, tls) { - let url = network_config.orderer.url; - if (tls === true){ - let data = fs.readFileSync(network_config.orderer.tls_cacerts); - let pem = Buffer.from(data).toString(); - return client.newOrderer(url, { - 'pem': pem, - 'ssl-target-name-override': network_config.orderer['server-hostname'] - }); - } else { - return client.newOrderer(url); - } -} - -var getRegisteredUsers = function(client, username, org, networkID, mspID) { - var keyPath = util.format('./configs/%s/peerOrganizations/%s/users/%s/msp/keystore/', networkID, org, username); - var keyPEM = Buffer.from(readAllFiles(keyPath)[0]).toString(); - var certPath = util.format('./configs/%s/peerOrganizations/%s/users/%s/msp/signcerts/', networkID, org, username); - var certPEM = readAllFiles(certPath)[0].toString(); - - var cryptoSuite = Client.newCryptoSuite(); - cryptoSuite.setCryptoKeyStore(Client.newCryptoKeyStore({path: '/tmp/fabric-client-kvs_'+org.split('.')[0]})); - client.setCryptoSuite(cryptoSuite); - - return Client.newDefaultKeyValueStore({ - path: getKeyStoreForOrg(org) - }).then((store) => { - client.setStateStore(store); - - return client.createUser({ - username: username.split('@')[0], - mspid: mspID, - cryptoContent: { - privateKeyPEM: keyPEM, - signedCertPEM: certPEM - } - }); - }); -} - -exports.newPeers = newPeers; -exports.newEventHubs = newEventHubs; -exports.setupPeers = setupPeers; -exports.newRemotes = newRemotes; -exports.newOrderer = newOrderer; -exports.getRegisteredUsers = getRegisteredUsers; -exports.getKeyStoreForOrg = getKeyStoreForOrg; -exports.readAllFiles = readAllFiles; diff --git a/app/platform/fabric/e2e-test/feature/sdk/node/invoke.js b/app/platform/fabric/e2e-test/feature/sdk/node/invoke.js deleted file mode 100644 index 60e826d16..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/node/invoke.js +++ /dev/null @@ -1,216 +0,0 @@ -/** - * Copyright IBM Corp All Rights Reserved - * - * SPDX-License-Identifier: Apache-2.0 - */ - -'use strict'; -const fs = require('fs'); -const path = require('path'); -const util = require('util'); -const hfc = require('fabric-client'); -const {Gateway, InMemoryWallet, X509WalletMixin} = require('fabric-network'); -const common = require('./common.js'); -const client = new hfc(); - -/** - * Perform an "invoke" action on installed/instantiated chaincode - * @param {String} inputFilePath the file path containing test run information as a JSON object containing - * { - * username: the test user name - * org: the organisation to use - * chaincode: object describing the chaincode parameters - * orderer: the orderer to use - * networkConfigFile: the network configuration file path - * opts: additional test parameters - * } - */ -function invoke(inputFilePath) { - - const filePath = path.join(__dirname, inputFilePath); - const inputData = JSON.parse(fs.readFileSync(filePath, {encoding: 'utf-8'})); - - const temptext = '\n\n Username : '+ inputData.user + - '\n\n Org: '+ inputData.org + - '\n\n OrgName: '+ inputData.orgName + - '\n\n chaincode : '+ util.format(inputData.chaincode) + - '\n\n peerNames : '+ inputData.peers + - '\n\n orderer: '+ inputData.orderer + - '\n\n network_config_path: '+ inputData.networkConfigFile + - '\n\n opts: '+ util.format(inputData.opts); - //console.log(temptext); - - // Read Network JSON PATH from behave - let network_config; - try { - network_config = JSON.parse(fs.readFileSync(inputData.networkConfigFile)); - } catch(err) { - console.error(err); - } - - // Node SDK implements transaction as well as invoke, disambiguate on the passed opts - if(inputData.opts && inputData.opts['network-model'] && inputData.opts['network-model'].localeCompare("true") === 0){ - return _submitTransaction(inputData.orgName, inputData.chaincode, network_config) - } else { - return _invoke(inputData.user, inputData.org, inputData.orgName, inputData.chaincode, inputData.peers, inputData.orderer, network_config) - } -}; - -/** - * Perform an invoke using the NodeSDK - * @param {Strinrg} username the user name to perform the action under - * @param {String} org the organisation to use - * @param {String} orgName the organisation name - * @param {JSON} chaincode the chaincode descriptor - * @param {[String]} peerNames string array of peers - * @param {String} orderer the orderer - * @param {JSON} network_config the network configuration - */ -function _invoke(username, org, orgName, chaincode, peerNames, orderer, network_config) { - let channel; - - let targets = (peerNames) ? common.newPeers(peerNames, orgName, network_config['network-config'], client) : undefined; - - const user = username.split('@')[1] ? username : username+'@'+org; - const userOrg = username.split('@')[1] ? username.split('@')[1] : org; - - let tx_id = null; - return common.getRegisteredUsers(client, user, userOrg, network_config['networkID'], network_config['network-config'][orgName]['mspid']).then((user) => { - tx_id = client.newTransactionID(); - - channel = client.newChannel(chaincode.channelId); - channel.addOrderer(common.newOrderer(client, network_config['network-config'], orderer, network_config['tls'])); - common.setupPeers(peerNames, channel, orgName, client, network_config['network-config'], network_config['tls']); - - // send proposal to endorser - let request = { - chaincodeId: chaincode.chaincodeId, - fcn: chaincode.fcn, - args: chaincode.args, - chainId: chaincode.channelId, - txId: tx_id - }; - - if (targets) { - request.targets = targets; - } - - console.info(JSON.stringify(["ok", "request is set"])); - return channel.sendTransactionProposal(request, 120000); - }, (err) => { - console.error('Failed to enroll user \'' + username + '\'. ' + err); - throw new Error('Failed to enroll user \'' + username + '\'. ' + err); - }).then((results) => { - console.info(JSON.stringify(["ok", "proposal sent"])); - let proposalResponses = results[0]; - let proposal = results[1]; - let all_good = true; - for (var i in proposalResponses) { - let one_good = false; - if (proposalResponses && proposalResponses[i].response && - proposalResponses[i].response.status === 200) { - one_good = true; - } else { - console.error('transaction proposal was bad'); - } - all_good = all_good & one_good; - } - if (all_good) { - var request = { - proposalResponses: proposalResponses, - proposal: proposal - }; - // set the transaction listener and set a timeout of 30sec - // if the transaction did not get committed within the timeout period, - // fail the test - let eventPromises = []; - - if (!peerNames) { - peerNames = channel.getPeers().map(function(peer) { - return peer.getName(); - }); - } - - let sendPromise = channel.sendTransaction(request); - return Promise.all([sendPromise].concat(eventPromises)).then((results) => { - return results[0]; // the first returned value is from the 'sendPromise' which is from the 'sendTransaction()' call - }).catch((err) => { - console.error(JSON.stringify( - ["error", 'Failed to send transaction and get notifications within the timeout period.'] - ) - ); - return 'Failed to send transaction and get notifications within the timeout period.'; - }); - } else { - console.error( - 'Failed to send Proposal or receive valid response. Response null or status is not 200. exiting...' - ); - return 'Failed to send Proposal or receive valid response. Response null or status is not 200. exiting...'; - } - }, (err) => { - console.error('Failed to send proposal due to error: ' + err.stack ? err.stack : - err); - return 'Failed to send proposal due to error: ' + err.stack ? err.stack : - err; - }).then((response) => { - if (response.status === 'SUCCESS') { - var jsonResponse = ["ok", tx_id.getTransactionID().toString()]; - console.info(JSON.stringify(jsonResponse)); - return JSON.stringify(jsonResponse); - } else { - console.error(JSON.stringify(["ok", 'Failed to order the transaction. Error code: ' + response])); - return 'Failed to order the transaction. Error code: ' + response.status; - } - }, (err) => { - console.error('Failed to send transaction due to error: ' + err.stack ? err - .stack : err); - return 'Failed to send transaction due to error: ' + err.stack ? err.stack : - err; - }); -} - -/** - * Perform a transaction invoke using the network APIs - * @param {String} org the organisation to use - * @param {JSON} chaincode the chaincode descriptor - * @param {JSON} network_config the network configuration - */ -async function _submitTransaction(org, chaincode, network_config){ - const ccp = network_config['common-connection-profile']; - const orgConfig = ccp.organizations[org]; - const cert = common.readAllFiles(orgConfig.signedCertPEM)[0]; - const key = common.readAllFiles(orgConfig.adminPrivateKeyPEM)[0]; - const inMemoryWallet = new InMemoryWallet(); - - const gateway = new Gateway(); - - try { - await inMemoryWallet.import('admin', X509WalletMixin.createIdentity(orgConfig.mspid, cert, key)); - - const opts = { - wallet: inMemoryWallet, - identity: 'admin', - discovery: { enabled: false } - }; - - await gateway.connect(ccp, opts); - - const network = await gateway.getNetwork(chaincode.channelId) - const contract = await network.getContract(chaincode.chaincodeId); - - const args = [chaincode.fcn, ...chaincode.args]; - const result = await contract.submitTransaction(...args); - - gateway.disconnect(); - return result; - } catch(err) { - throw new Error(err); - }; -} - -exports.invoke = invoke; -require('make-runnable'); - -// Example test calls -// node invoke.js invoke User1@org1.example.com Org1ExampleCom '{"channelId": "behavesystest", "args": ["a", "b", "10"], "chaincodeId": "mycc", "name": "mycc", "fcn": "invoke"}' ['peer0.org1.example.com'] orderer0.example.com /Users/nkl/go/src/github.com/hyperledger/fabric-test/feature/configs/0be5908ac30011e88d70acbc32c08695/network-config.json '{"transaction": "true"}' -// node invoke.js invoke User1@org1.example.com Org1ExampleCom '{"channelId": "behavesystest", "args": ["a", "b", "10"], "chaincodeId": "mycc", "name": "mycc", "fcn": "invoke"}' ['peer0.org1.example.com'] orderer0.example.com /Users/nkl/go/src/github.com/hyperledger/fabric-test/feature/configs/4fe4f54cc62411e8977eacbc32c08695/network-config.json '{"transaction": "true"}' \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/feature/sdk/node/query.js b/app/platform/fabric/e2e-test/feature/sdk/node/query.js deleted file mode 100644 index f3088f436..000000000 --- a/app/platform/fabric/e2e-test/feature/sdk/node/query.js +++ /dev/null @@ -1,184 +0,0 @@ -/** - * Copyright IBM Corp All Rights Reserved - * - * SPDX-License-Identifier: Apache-2.0 - */ - -const fs = require('fs'); -const path = require('path'); -const util = require('util'); -const common = require('./common.js'); -const {Gateway, InMemoryWallet, X509WalletMixin} = require('fabric-network'); -const Client = require('fabric-client'); -let client = new Client(); - -/** - * Perform a query using installed/instantiated chaincode - * @param {String} inputFilePath the file path containing test run information as a JSON object containing - * { - * username: the test user name - * org: the organisation to use - * chaincode: object describing the chaincode parameters - * peer: array of the peers to use - * networkConfigFile: the network configuration file path - * opts: additional test parameters - * } - */ -function query(inputFilePath) { - - const filePath = path.join(__dirname, inputFilePath); - const inputData = JSON.parse(fs.readFileSync(filePath, {encoding: 'utf-8'})); - - const temptext = '\n\n user : ' + inputData.user + - '\n\n Org: ' + inputData.org + - '\n\n OrgName: ' + inputData.orgName + - '\n\n chaincode : ' + util.format(inputData.chaincode) + - '\n\n peerNames : ' + inputData.peers + - '\n\n network_config_path: ' + inputData.networkConfigFile; - '\n\n opts: '+ util.format(inputData.opts); - //console.log(temptext); - - let network_config; - try { - network_config = JSON.parse(fs.readFileSync(inputData.networkConfigFile)); - } catch(err) { - console.error(err); - } - - // Node SDK implements network and native options, disambiguate on the passed opts - if(inputData.opts && inputData.opts['network-model'] && inputData.opts['network-model'].localeCompare("true") === 0){ - console.log('evaluating transaction .... ') - return _evaluateTransaction(inputData.orgName, inputData.chaincode, network_config) - } else { - console.log('performing query .... ') - return _query(inputData.user, inputData.peers[0], inputData.org, inputData.orgName, inputData.chaincode, network_config) - } -} - -/** - * Perform a query using the NodeJS SDK - * @param {String} username the user - * @param {String} peer the peer to use - * @param {String} userOrg the organisation to use - * @param {String} orgName the organisation name - * @param {JSON} chaincode the chaincode descriptor - * @param {JSON} network_config_details the network configuration - */ -async function _query(username, peer, org, orgName, chaincode, network_config_details){ - const user = username.split('@')[1] ? username : username+'@'+org; - const userOrg = username.split('@')[1] ? username.split('@')[1] : org; - const target = buildTarget(peer, orgName, network_config_details['network-config']); - - Client.setConfigSetting('request-timeout', 60000); - - // this is a transaction, will just use org's identity to - // submit the request. intentionally we are using a different org - // than the one that submitted the "move" transaction, although either org - // should work properly - const channel = client.newChannel(chaincode.channelId); - - const tlsInfo = await common.getRegisteredUsers(client, user, userOrg, network_config_details['networkID'], network_config_details['network-config'][orgName]['mspid']); - client.setTlsClientCertAndKey(tlsInfo.certificate, tlsInfo.key); - - const store = await Client.newDefaultKeyValueStore({path: common.getKeyStoreForOrg(userOrg)}); - client.setStateStore(store); - - const admin = await common.getRegisteredUsers(client, user,userOrg, network_config_details['networkID'], network_config_details['network-config'][orgName]['mspid']); - - tx_id = client.newTransactionID(); - common.setupPeers(peer, channel, orgName, client, network_config_details['network-config'], network_config_details['tls']); - - let request = { - targets: [target], - txId: tx_id, - chaincodeId: chaincode.chaincodeId, - fcn: chaincode.fcn, - args: chaincode.args - }; - - try { - // send query - const response_payloads = await channel.queryByChaincode(request); - if (response_payloads) { - let stringRespose = formatString(response_payloads.toString()); - var jsonResponse = {'response': stringRespose}; - console.info('\n query jsonResponse: ', jsonResponse); - return JSON.stringify(jsonResponse); - } else { - console.error('response_payloads is null'); - return {'error': 'response_payloads is null'}; - } - } catch (err) { - console.error(['error', 'Failed to send query due to error:' + err.stack ? err.stack : err]); - return {'Error': 'Failed to send query due to error:' + err.stack ? err.stack : err}; - } -}; - -function buildTarget(peer, org, network_config) { - var target = null; - if (typeof peer !== 'undefined') { - let targets = common.newPeers([peer], org, network_config, client); - if (targets && targets.length > 0) target = targets[0]; - } - return target; -} - -/** - * Conditionally strip the leading/trailing double quotes - */ -function formatString(inputString){ - if ((inputString.charAt(0)=='"') && (inputString.charAt(inputString.length -1)=='"')) { - return inputString.slice(1, -1); - } else { - return inputString; - } -} - -/** - * Perform a query using the NodeJS Netowrk APIs - * @param {String} org the organisation to use - * @param {JSON} chaincode the chaincode descriptor - * @param {JSON} network_config the network configuration - */ -async function _evaluateTransaction(org, chaincode, network_config){ - const ccp = network_config['common-connection-profile']; - const orgConfig = ccp.organizations[org]; - const cert = common.readAllFiles(orgConfig.signedCertPEM)[0]; - const key = common.readAllFiles(orgConfig.adminPrivateKeyPEM)[0]; - const inMemoryWallet = new InMemoryWallet(); - - const gateway = new Gateway(); - - try { - await inMemoryWallet.import('admin', X509WalletMixin.createIdentity(orgConfig.mspid, cert, key)); - - const opts = { - wallet: inMemoryWallet, - identity: 'admin', - discovery: { enabled: false } - }; - - await gateway.connect(ccp, opts); - - const network = await gateway.getNetwork(chaincode.channelId) - const contract = await network.getContract(chaincode.chaincodeId); - - const args = [chaincode.fcn, ...chaincode.args]; - const result = await contract.evaluateTransaction(...args); - - gateway.disconnect(); - - let stringRespose = formatString(result.toString()); - let jsonResponse = {'response': stringRespose}; - return JSON.stringify(jsonResponse); - } catch(err) { - throw new Error(err); - }; -} - -exports.query = query; -require('make-runnable'); - -// Example test calls -// node query.js query User1@org2.example.com Org2ExampleCom' {"args": ["a"], "fcn":"query", "channelId": "behavesystest", "chaincodeId": "mycc"}' ["peer1.org2.example.com"] /opt/gopath/src/github.com/hyperledger/fabric-test/feature/configs/3f09636eb35811e79e510214683e8447/network-config.json; -// node query.js query User1@org1.example.com Org1ExampleCom '{"channelId": "behavesystest", "args": ["a"], "chaincodeId": "mycc", "name": "mycc", "fcn": "query"}' ['peer0.org1.example.com'] /Users/nkl/go/src/github.com/hyperledger/fabric-test/feature/configs/4fe4f54cc62411e8977eacbc32c08695/network-config.json '{"transaction": "true"}' \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/feature/steps/__init__.py b/app/platform/fabric/e2e-test/feature/steps/__init__.py deleted file mode 100644 index 49cd7f3ac..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/app/platform/fabric/e2e-test/feature/steps/basic_impl.py b/app/platform/fabric/e2e-test/feature/steps/basic_impl.py deleted file mode 100644 index da1f43bc0..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/basic_impl.py +++ /dev/null @@ -1,456 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from behave import * -import time -import os -import uuid -import subprocess -from shutil import copyfile -import common_util -import compose_util -import orderer_util -import config_util -from endorser_util import CLIInterface, ToolInterface, SDKInterface -import database_util - - -@given(u'I wait "{seconds}" seconds') -@when(u'I wait "{seconds}" seconds') -@then(u'I wait "{seconds}" seconds') -def step_impl(context, seconds): - time.sleep(float(seconds)) - -@given(u'I use the {language} SDK interface') -def step_impl(context, language): - context.interface = SDKInterface(context, language) - -@given(u'I use the CLI interface') -def step_impl(context): - context.interface = CLIInterface() - -@given(u'I use the tool interface {toolCommand}') -def step_impl(context, toolCommand): - # The tool command is what is used to generate the network that will be setup for use in the tests - context.network = toolCommand - context.interface = ToolInterface(context) - -@given(u'I compose "{composeYamlFile}"') -def compose_impl(context, composeYamlFile, projectName=None, startContainers=True): - if not hasattr(context, "composition"): - context.composition = compose_util.Composition(context, composeYamlFile, - projectName=projectName, - startContainers=startContainers) - else: - context.composition.composeFilesYaml = composeYamlFile - context.composition.up() - context.compose_containers = context.composition.collectServiceNames() - -def getCompositionFiles(context, curpath, ordererType, database="leveldb", fca=False): - # Get the correct composition file - composeFiles = ["%s/docker-compose/docker-compose-%s.yml" % (curpath, ordererType)] - if database.lower() != "leveldb": - composeFiles.append("%s/docker-compose/docker-compose-%s.yml" % (curpath, database.lower())) - composeFiles.append("%s/docker-compose/docker-compose-cli.yml" % (curpath)) - - # If using fabric-ca insert the fabric-ca composition file to start first - if fca: - composeFiles.insert(0, "%s/docker-compose/docker-compose-fca.yml" % (curpath)) - - for composeFile in composeFiles: - assert os.path.exists(composeFile), "The docker compose file does not exist: {0}".format(composeFile) - return composeFiles - -def bootstrapped_impl(context, ordererType, database, tlsEnabled=False, timeout=300, ouEnabled=False, fca=False): - assert ordererType in config_util.ORDERER_TYPES, "Unknown network type '%s'" % ordererType - curpath = os.path.realpath('.') - - # Get the correct composition file - context.composeFile = getCompositionFiles(context, curpath, ordererType, database, fca) - - # Should TLS be enabled - context.tls = tlsEnabled - compose_util.enableTls(context, tlsEnabled) - - # Perform bootstrap process - context.ordererProfile = config_util.PROFILE_TYPES.get(ordererType, "SampleInsecureSolo") - channelID = context.interface.SYS_CHANNEL_ID - if hasattr(context, "composition"): - context.projectName = context.composition.projectName - elif not hasattr(context, "projectName"): - context.projectName = str(uuid.uuid1()).replace('-','') - - # Determine number of orderers - numOrderers = 1 - if ordererType == 'kafka': - numOrderers = 3 - - # Get Configs setup - if ouEnabled: - config_util.buildCryptoFile(context, 2, 2, numOrderers, 2, ouEnable=ouEnabled) - config_util.generateCrypto(context, "./configs/{0}/crypto.yaml".format(context.projectName)) - else: - config_util.generateCrypto(context) - config_util.generateConfig(context, channelID, config_util.CHANNEL_PROFILE, context.ordererProfile) - - compose_impl(context, context.composeFile, projectName=context.projectName) - wait_for_bootstrap_completion(context, timeout) - - -def wait_for_bootstrap_completion(context, timeout): - peers = context.interface.get_peers(context) - brokers = [] - try: - with common_util.Timeout(timeout): - common_util.wait_until_in_log(peers, "Starting profiling server with listenAddress = 0.0.0.0:6060") - - # Check Kafka logs - if "kafka0" in context.composition.collectServiceNames(): - kafkas = orderer_util.getKafkaBrokerList(context, "orderer0.example.com") - # Remove the ports from the list - for kafka in kafkas: - broker = kafka.split(":") - brokers.append(broker[0]) - common_util.wait_until_in_log(brokers, " Startup complete. ") - finally: - assert common_util.is_in_log(peers, "Starting profiling server with listenAddress = 0.0.0.0:6060"), "The peer containers are not ready in the allotted time ({} seconds)".format(timeout) - assert common_util.is_in_log(brokers, " Startup complete. "), "The kafka containers are not ready in the allotted time ({} seconds)".format(timeout) - - # A 5-second additional delay ensures ready state - time.sleep(5) - - -def bootstrap_fca_impl(context, tlsEnabled=False): - # Should TLS be enabled - context.tls = tlsEnabled - compose_util.enableTls(context, tlsEnabled) - context = config_util.setCAConfig(context) - compose_impl(context, ["docker-compose/docker-compose-preca.yml"], context.projectName) - -@given(u'I bootstrap a fabric-ca server without tls') -def step_impl(context): - bootstrap_fca_impl(context, False) - -@given(u'I bootstrap a fabric-ca server with tls') -def step_impl(context): - bootstrap_fca_impl(context, True) - -@given(u'I bootstrap a fabric-ca server') -def step_impl(context): - bootstrap_fca_impl(context, False) - - -@given(u'I have a fabric-ca bootstrapped fabric network of type {ordererType} using state-database {database} with tls') -def step_impl(context, ordererType, database): - config_util.setCAConfig(context) - bootstrapped_impl(context, ordererType, database, True, fca=True) - -@given(u'I have a fabric-ca bootstrapped fabric network of type {ordererType} using state-database {database} without tls') -def step_impl(context, ordererType, database): - config_util.setCAConfig(context) - bootstrapped_impl(context, ordererType, database, False, fca=True) - -@given(u'I have a fabric-ca bootstrapped fabric network of type {ordererType} using state-database {database}') -def step_impl(context, ordererType, database): - config_util.setCAConfig(context) - bootstrapped_impl(context, ordererType, database, True, fca=True) - -@given(u'I have a fabric-ca bootstrapped fabric network using state-database {database} with tls') -def step_impl(context, database): - config_util.setCAConfig(context) - bootstrapped_impl(context, "solo", database, True, fca=True) - -@given(u'I have a fabric-ca bootstrapped fabric network of type {ordererType} with tls') -def step_impl(context, ordererType): - config_util.setCAConfig(context) - bootstrapped_impl(context, ordererType, "leveldb", True, fca=True) - -@given(u'I have a fabric-ca bootstrapped fabric network with tls') -def step_impl(context): - config_util.setCAConfig(context) - bootstrapped_impl(context, "solo", "leveldb", True, fca=True) - -@given(u'I have a fabric-ca bootstrapped fabric network of type {ordererType}') -def step_impl(context, ordererType): - config_util.setCAConfig(context) - bootstrapped_impl(context, ordererType, "leveldb", True, fca=True) - -@given(u'I have a bootstrapped fabric network of type {ordererType} with tls with organizational units enabled on all nodes') -def step_impl(context, ordererType): - bootstrapped_impl(context, ordererType, "leveldb", True, ouEnabled=True) - -@given(u'I have a bootstrapped fabric network of type {ordererType} with tls with organizational units enabled on all {orgName} nodes') -def step_impl(context, ordererType, orgName): - bootstrapped_impl(context, ordererType, "leveldb", True, ouEnabled=orgName) - -@given(u'I have a bootstrapped fabric network of type {ordererType} using state-database {database} with tls') -def step_impl(context, ordererType, database): - bootstrapped_impl(context, ordererType, database, True) - -@given(u'I have a bootstrapped fabric network of type {ordererType} using state-database {database} without tls') -def step_impl(context, ordererType, database): - bootstrapped_impl(context, ordererType, database, False) - -@given(u'I have a bootstrapped fabric network of type {ordererType} using state-database {database}') -def step_impl(context, ordererType, database): - bootstrapped_impl(context, ordererType, database, False) - -@given(u'I have a bootstrapped fabric network using state-database {database} with tls') -def step_impl(context, database): - bootstrapped_impl(context, "solo", database, True) - -@when(u'a user defines a couchDB index named {indexName} with design document name "{docName}" containing the fields "{fields}" to the chaincode at path "{path}"') -def step_impl(context, indexName, docName, fields, path): - database_util.generateIndex(indexName, docName, fields, path) - -@given(u'I have a bootstrapped fabric network of type {ordererType} with tls') -def step_impl(context, ordererType): - bootstrapped_impl(context, ordererType, "leveldb", True) - -@given(u'I have a bootstrapped fabric network with tls') -def step_impl(context): - bootstrapped_impl(context, "solo", "leveldb", True) - -@given(u'I have a bootstrapped fabric network using state-database {database} without tls') -def step_impl(context, database): - bootstrapped_impl(context, "solo", database, False) - -@given(u'I have a bootstrapped fabric network using state-database {database}') -def step_impl(context, database): - bootstrapped_impl(context, "solo", database, False) - -@given(u'I have a bootstrapped fabric network of type {ordererType} without tls') -def step_impl(context, ordererType): - bootstrapped_impl(context, ordererType, "leveldb", False) - -@given(u'I have a bootstrapped fabric network of type {ordererType}') -def step_impl(context, ordererType): - bootstrapped_impl(context, ordererType, "leveldb", False) - -@given(u'I have a bootstrapped fabric network without tls') -def step_impl(context): - bootstrapped_impl(context, "solo", "leveldb", False) - -@given(u'I have a bootstrapped fabric network') -def step_impl(context): - bootstrapped_impl(context, "solo", "leveldb", False) - -@when(u'I vendor "{language}" packages for fabric-based chaincode at "{path}"') -def step_impl(context, language, path): - if language.upper() == "GOLANG": - print(subprocess.check_output(["govendor init && govendor add +external"], cwd=path, shell=True)) - elif language=="NODE": - print(subprocess.check_output(["npm install"], cwd=path, shell=True)) - else: - assert False, "undefined language: {}".format(context.language) - -@when(u'I vendor go packages for non-fabric-based chaincode at "{path}"') -def step_impl(context, path): - print(subprocess.check_output(["govendor init && govendor add +external && govendor fetch {}".format(path)], cwd=path, shell=True)) - -@when(u'the initial leader peer of "{org}" is taken down by doing a {takeDownType}') -def step_impl(context, org, takeDownType): - bringdown_impl(context, context.interface.get_initial_leader(context, org), takeDownType) - -@when(u'the initial leader peer of "{org}" is taken down') -def step_impl(context, org): - bringdown_impl(context, context.interface.get_initial_leader(context, org)) - -@when(u'the initial non-leader peer of "{org}" is taken down by doing a {takeDownType}') -def step_impl(context, org, takeDownType): - bringdown_impl(context, context.interface.get_initial_non_leader(context, org), takeDownType) - -@when(u'the initial non-leader peer of "{org}" is taken down') -def step_impl(context, org): - bringdown_impl(context, context.interface.get_initial_non_leader(context, org)) - -@when(u'"{component}" is taken down by doing a {takeDownType}') -def step_impl(context, component, takeDownType): - bringdown_impl(context, component, takeDownType) - -@when(u'"{component}" is taken down') -def bringdown_impl(context, component, takeDownType="stop"): - assert component in context.composition.collectServiceNames(), "Unknown component '{0}'".format(component) - if takeDownType=="stop": - context.composition.stop([component]) - elif takeDownType=="pause": - context.composition.pause([component]) - elif takeDownType=="disconnect": - context.composition.disconnect([component]) - else: - assert False, "takedown process undefined: {}".format(context.takeDownType) - -@when(u'the initial leader peer of "{org}" comes back up by doing a {bringUpType}') -def step_impl(context, org, bringUpType): - bringup_impl(context, context.interface.get_initial_leader(context, org), bringUpType) - -@when(u'the initial leader peer of "{org}" comes back up') -def step_impl(context, org): - bringup_impl(context, context.interface.get_initial_leader(context, org)) - -@when(u'the initial non-leader peer of "{org}" comes back up by doing a {bringUpType}') -def step_impl(context, org, bringUpType): - bringup_impl(context, context.interface.get_initial_non_leader(context, org), bringUpType) - -@when(u'the initial non-leader peer of "{org}" comes back up') -def step_impl(context, org): - bringup_impl(context, context.interface.get_initial_non_leader(context, org)) - -@when(u'"{component}" comes back up by doing a {bringUpType}') -def step_impl(context, component, bringUpType): - bringup_impl(context, component, bringUpType) - -@when(u'"{component}" comes back up') -def bringup_impl(context, component, bringUpType="start"): - assert component in context.composition.collectServiceNames(), "Unknown component '{0}'".format(component) - if bringUpType=="start": - context.composition.start([component]) - elif bringUpType=="unpause": - context.composition.unpause([component]) - elif bringUpType=="connect": - context.composition.connect([component]) - else: - assert False, "Bringing-up process undefined: {}".format(context.bringUpType) - -@when(u'I start a fabric network using a {ordererType} orderer service with tls') -def start_network_impl(context, ordererType, tlsEnabled=True): - assert ordererType in config_util.ORDERER_TYPES, "Unknown network type '%s'" % ordererType - curpath = os.path.realpath('.') - - context.composeFile = getCompositionFiles(context, curpath, ordererType) - - if not hasattr(context, "projectName"): - context.projectName = None - - # Should TLS be enabled - context.tls = tlsEnabled - compose_util.enableTls(context, tlsEnabled, projectName=context.projectName) - - compose_impl(context, context.composeFile, projectName=context.projectName) - -@when(u'I start a fabric network using a {ordererType} orderer service') -def step_impl(context, ordererType): - start_network_impl(context, ordererType, False) - -@when(u'I start a fabric network with TLS') -def step_impl(context): - start_network_impl(context, "solo", True) - -@when(u'I start a fabric network') -def step_impl(context): - start_network_impl(context, "solo", False) - -@when(u'I locally execute the command "{command}" saving the results as "{key}"') -def step_impl(context, command, key): - # This is a workaround to allow sending piped commands to behave without conflicting with the pipes in the table. - command = command.replace("!", "|") - if not hasattr(context, "command_result"): - context.command_result = {} - - if "|" in command: - context.command_result[key] = subprocess.check_output(command, shell=True).strip() - else: - cmd = command.split() - context.command_result[key] = subprocess.check_output(cmd, env=os.environ).strip() - print("command result: {}".format(context.command_result)) - -@when(u'an admin adds an organization to the {channelName} channel config') -def step_impl(context, channelName): - add_org_impl(context, "org3.example.com", channelName) - -@when(u'an admin adds an organization to the channel config') -def step_impl(context): - add_org_impl(context, "org3.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'an admin adds an organization named {orgMSP} to the {channelName} channel config') -def add_org_impl(context, orgMSP, channelName): - configDir = "./configs/{0}".format(context.projectName) - - #Save original crypto.yaml file - if os.path.exists("{0}/crypto.yaml".format(configDir)): - copyfile("{0}/crypto.yaml".format(configDir), - "{0}/crypto_orig.yaml".format(configDir)) - - # Add cryptogen info for 3rd org - config_util.buildCryptoFile(context, 1, 2, 0, 2, orgMSP=orgMSP) - config_util.generateCrypto(context, "{0}/crypto.yaml".format(configDir)) - config_util.generateCryptoDir(context, 1, 2, 0, 2, tlsExist=context.tls, orgMSP=orgMSP) - args = config_util.getNewOrg(context, orgMSP) - updated_config = config_util.addNewOrg(context, args, "Application", channelName) - - update_impl(context, 'peer', channelName, updated_config, userName='Admin') - -@when(u'an admin removes an organization named {orgMSP} from the channel config') -def step_impl(context, orgMSP): - del_org_impl(context, orgMSP, context.interface.TEST_CHANNEL_ID) - -@when(u'an admin removes an organization named {orgMSP} from the {channelName} channel config') -def del_org_impl(context, orgMSP, channelName): - configDir = "./configs/{0}".format(context.projectName) - - # Format the args for removing orgMSP - args = config_util.delNewOrg(context, "Application", orgMSP, channelName) - - update_impl(context, 'peer', channelName, args, userName='Admin') - -@when(u'an {component} admin updates the {channelName} channel config with {args}') -def step_impl(context, component, channelName, args): - update_impl(context, component, channelName, args, userName='Admin') - -@when(u'an admin updates the channel config with {args}') -def step_impl(context, args): - update_impl(context, 'peer', context.interface.TEST_CHANNEL_ID, args, userName='Admin') - -@when(u'an {component} admin with username {userName} updates the {channelName} channel config with {args}') -def update_impl(context, component, channelName, args, userName='Admin'): - assert component in ('peer', 'orderer'), "Error: the component type must be either 'peer' or 'orderer' instead of '{}'.".format(component) - if channelName == "system": - assert component == 'orderer', "Error: Only an orderer admin may update the system channel." - channelName = context.interface.SYS_CHANNEL_ID - - # fetch the block for the specified channel - peers = context.interface.get_peers(context) - assert len(peers) > 0, "Error: There are no peers on this fabric network." - context.interface.fetch_channel(context, peers, 'orderer0.example.com', channelName, user=userName) - - # Convert block file to json & Prep for update - context.block_filename = config_util.configUpdate(context, args, "Application", channelName) - -@when(u'the peers from the added organization are added to the network') -def step_impl(context): - curpath = os.path.realpath('.') - context.composeFile.append("%s/docker-compose/docker-compose-peer-org3.yml" % (curpath)) - context.composition.up(force_recreate=False, components=["peer0.org3.example.com", "peer1.org3.example.com"]) - -@then(u'the initial non-leader peer of "{org}" has become the leader') -def step_impl(context, org): - assert hasattr(context, 'initial_non_leader'), "Error: initial non-leader was not set previously. This statement works only with pre-set initial non-leader." - max_waittime=15 - waittime=5 - try: - with common_util.Timeout(max_waittime): - while not common_util.get_leadership_status(context.initial_non_leader[org]): - time.sleep(waittime) - finally: - assert common_util.get_leadership_status(context.initial_non_leader[org]), "Error: The initial non-leader peer has not become leader, after "+str(max_waittime)+" seconds." - -@then(u'the initial non-leader peer of "{org}" has not become the leader') -def step_impl(context, org): - assert hasattr(context, 'initial_non_leader'), "Error: initial non-leader was not set previously. This statement works only with pre-set initial non-leader." - assert not common_util.get_leadership_status(context.initial_non_leader[org]), "Error: initial non-leader peer has already become leader." - -@then(u'the logs on {component} contains "{data}" within {timeout:d} seconds') -def step_impl(context, component, data, timeout): - with common_util.Timeout(timeout): - common_util.wait_until_in_log([component], data) - -@then(u'the logs on {component} contains {data}') -def step_impl(context, component, data): - assert common_util.is_in_log(component, data), "Error: the {0} log does not contain {1}.".format(component, data) - -@then(u'there are no errors') -def step_impl(context): - pass diff --git a/app/platform/fabric/e2e-test/feature/steps/bootstrap_impl.py b/app/platform/fabric/e2e-test/feature/steps/bootstrap_impl.py deleted file mode 100644 index c56ca2ce4..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/bootstrap_impl.py +++ /dev/null @@ -1,108 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -from behave import * -import os -import config_util -import orderer_util -import common_util - -TEST_CHANNEL_ID = "syschannel" - -@given(u'I have a fabric config file') -def step_impl(context): - if not hasattr(context, "projectName"): - config_util.generateCrypto(context) - config_util.setupConfigs(context, TEST_CHANNEL_ID) - -@given(u'I have a crypto config file with {numOrgs} orgs, {numPeers} peers, {numOrderers} orderers, and {numUsers} users') -def step_impl(context, numOrgs, numPeers, numOrderers, numUsers): - config_util.buildCryptoFile(context, numOrgs, numPeers, numOrderers, numUsers) - -@given(u'I register the orderers using fabric-ca') -def step_impl(context): - orderers = context.interface.get_orderers(context) - context.interface.registerIdentities(context, orderers) - -@given(u'I register the peers using fabric-ca') -def step_impl(context): - peers = context.interface.get_peers(context) - context.interface.registerIdentities(context, peers) - -@given(u'I enroll the following users using fabric-ca') -def step_impl(context): - assert 'table' in context, "Expected table with user, organization, password, and role" - context.users = {} - for row in context.table.rows: - context.users[row['username']] = {'organization': row['organization'], - 'password': row['password'], - 'role': row['role']} - context.interface.enrollUsersFabricCA(context) - -@when(u'the network is bootstrapped for an orderer of type {ordererType}') -def ordererBootstrap_impl(context, ordererType): - context.ordererProfile = config_util.PROFILE_TYPES.get(ordererType, "SampleInsecureSolo") - config_util.generateOrdererConfig(context, context.interface.SYS_CHANNEL_ID, context.ordererProfile, "orderer.block") - config_util.generateChannelConfig(context.interface.SYS_CHANNEL_ID, config_util.CHANNEL_PROFILE, context) - -@when(u'the network is bootstrapped for an orderer') -def step_impl(context): - ordererBootstrap_impl(context, "solo") - -@when(u'the network is bootstrapped for a channel named "{channelId}"') -def step_impl(context, channelId): - config_util.generateChannelConfig(channelId, config_util.CHANNEL_PROFILE, context) - -@when(u'the crypto material is generated for TLS network') -@when(u'the crypto material is generated') -def step_impl(context): - config_util.generateCrypto(context, "./configs/{0}/crypto.yaml".format(context.projectName)) - -@then(u'crypto directories are generated containing certificates for {numOrgs} orgs, {numPeers} peers, {numOrderers} orderers, and {numUsers} users') -def step_impl(context, numOrgs, numPeers, numOrderers, numUsers): - config_util.generateCryptoDir(context, numOrgs, numPeers, numOrderers, numUsers, tlsExist=False) - -@then(u'crypto directories are generated containing tls certificates for {numOrgs} orgs, {numPeers} peers, {numOrderers} orderers, and {numUsers} users') -def step_impl(context, numOrgs, numPeers, numOrderers, numUsers): - config_util.generateCryptoDir(context, numOrgs, numPeers, numOrderers, numUsers, tlsExist=True) - -@then(u'the "{fileName}" file is generated') -def step_impl(context, fileName): - assert hasattr(context, "projectName"), "There is no projectName assigned for this test" - assert os.path.exists("./configs/{0}/{1}".format(context.projectName, fileName)), "The file {0} does not exist".format(fileName) - -@then(u'the updated config block does not contain {value}') -def step_impl(context, value): - blockInfo = config_util.inspectOrdererConfig(context, "{}.block".format(context.interface.TEST_CHANNEL_ID), context.interface.SYS_CHANNEL_ID) - assert str(value) not in str(blockInfo) - -@then(u'the updated config block contains {value}') -@then(u'the orderer block contains {value}') -def step_impl(context, value): - blockInfo = config_util.inspectOrdererConfig(context, "{}.block".format(context.interface.TEST_CHANNEL_ID), context.interface.SYS_CHANNEL_ID) - assert str(value) in str(blockInfo) - -@then(u'the updated config block "{fileName}" contains {value}') -@then(u'the orderer block "{fileName}" contains {value}') -def step_impl(context, fileName, value): - blockInfo = config_util.inspectOrdererConfig(context, fileName, context.interface.SYS_CHANNEL_ID) - assert str(value) in str(blockInfo) - -@then(u'the channel transaction file contains {value}') -def step_impl(context, value): - blockInfo = config_util.inspectChannelConfig(context, "{}.tx".format(context.interface.TEST_CHANNEL_ID), context.interface.SYS_CHANNEL_ID) - assert str(value) in str(blockInfo) - -@then(u'the channel transaction file "{fileName}" contains {value}') -def step_impl(context, fileName, value): - blockInfo = config_util.inspectChannelConfig(context, fileName, context.interface.SYS_CHANNEL_ID) - assert str(value) in str(blockInfo) - -@when('the orderer node logs receiving the orderer block') -def step_impl(context): - orderers = orderer_util.getOrdererList(context) - for orderer in orderers: - assert common_util.is_in_log([orderer], "with genesis block hash"), "The genesis block is not received" diff --git a/app/platform/fabric/e2e-test/feature/steps/common_util.py b/app/platform/fabric/e2e-test/feature/steps/common_util.py deleted file mode 100644 index 6c1b3dc73..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/common_util.py +++ /dev/null @@ -1,89 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import sys -import datetime -import subprocess -import time -import signal - -def changeFormat(value): - ''' - Here is the function that returns by changing the format of time. - For example 'Seconds' to "s" - ''' - changedString = value - toChangeUnits = value.split(" ") - if len(toChangeUnits) == 2: - if "minute" in toChangeUnits[1]: - changedString = toChangeUnits[0]+"m" - elif "second" in toChangeUnits[1]: - changedString = toChangeUnits[0]+"s" - elif "hour" in toChangeUnits[1]: - changedString = toChangeUnits[0]+"h" - elif "MB" in toChangeUnits[1]: - changedString = str(int( float(toChangeUnits[0]) * 1024 * 1024) ) - elif "KB" in toChangeUnits[1]: - changedString = str(int( float(toChangeUnits[0]) * 1024) ) - elif "B" in toChangeUnits[1]: - changedString = toChangeUnits[0] - return changedString - -def convertBoolean(boolean): - return str(boolean).lower() - -def convertToSeconds(envValue): - if envValue[-1] == 'm': - value = 60 * int(envValue[:-1]) - elif envValue[-1] == 's': - value = int(envValue[:-1]) - elif envValue[-1] == 'h': - value = 3600 * int(envValue[:-1]) - else: - raise "'{0}' is not in the expected format".format(envValue) - return value - -def get_leadership_status(container): - #Checks the last occurence of "IsLeader" and its result - rc = subprocess.call( - "docker logs " + container + " 2>&1 | grep -a \"IsLeader\" | tail -n 1 | grep -a \"Returning true\"", - shell=True) - if rc != 0: - return False - return True - -def is_in_log(containers, keyText): - for container in containers: - rc = subprocess.call( - "docker logs " + container + " 2>&1 | grep " + "\"" + keyText + "\"", - shell=True) - if rc != 0: - return False - return True - -def wait_until_in_log(containers, keyText): - while not is_in_log(containers, keyText): - time.sleep(1) - return True - - -class Timeout(): - class TimeoutException(Exception): - pass - - def __init__(self, sec): - self.sec = sec - - def __enter__(self): - signal.signal(signal.SIGALRM, self.raise_timeout) - signal.alarm(self.sec) - - def __exit__(self, *args): - signal.alarm(0) - - def raise_timeout(self, *args): - raise Timeout.TimeoutException() diff --git a/app/platform/fabric/e2e-test/feature/steps/compose_util.py b/app/platform/fabric/e2e-test/feature/steps/compose_util.py deleted file mode 100644 index 82f5f14a8..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/compose_util.py +++ /dev/null @@ -1,328 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import sys -import subprocess -import shutil -import json -import uuid -import time -from common_util import convertBoolean - - -def enableTls(context, tlsEnabled, projectName=None): - if not hasattr(context, "composition"): - context.composition = Composition(context, projectName=projectName, startContainers=False) - context.composition.environ["ORDERER_GENERAL_TLS_ENABLED"] = convertBoolean(tlsEnabled) - context.composition.environ["CORE_PEER_TLS_ENABLED"] = convertBoolean(tlsEnabled) - context.composition.environ["FABRIC_CA_SERVER_TLS_ENABLED"] = convertBoolean(tlsEnabled) - - -class ContainerData: - def __init__(self, containerName, ipAddress, envFromInspect, composeService, ports): - self.containerName = containerName - self.ipAddress = ipAddress - self.envFromInspect = envFromInspect - self.composeService = composeService - self.ports = ports - - def getEnv(self, key): - """ - Gathers the environment information from "docker inspect" - Returns the value that is set in the environment variable - """ - envValue = None - for val in self.envFromInspect: - if val.startswith(key): - envValue = val[len(key)+1:].strip() - break - if envValue == None: - raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName)) - return envValue - - -class Composition: - - def __init__(self, context, composeFilesYaml=None, projectName=None, - force_recreate=True, components=[], startContainers=True): - if not projectName: - projectName = str(uuid.uuid1()).replace('-','') - self.projectName = projectName - self.context = context - self.containerDataList = [] - self.environ = {} - self.composeFilesYaml = composeFilesYaml - if startContainers: - self.up(force_recreate, components) - - def collectServiceNames(self): - servicesList = [service for service in self.issueCommand(["config", "--services"]).splitlines() if "WARNING" not in service] - return servicesList - - def up(self, force_recreate=True, components=[]): - command = ["up", "-d"] - if force_recreate: - command += ["--force-recreate"] - cas = ["ca.example.com", "ca.org1.example.com", "ca.org2.example.com"] - for ca in cas: - self.setFabricCaEnv(ca) - self.issueCommand(command + components) - - def scale(self, serviceName, count=1): - command = ["scale", "%s=%d" %(serviceName, count)] - self.issueCommand(command) - - def stop(self, components=[]): - command = ["stop"] - self.issueCommand(command, components) - - def pause(self, components=[]): - command = ["pause"] - self.issueCommand(command, components) - - def disconnect(self, components=[]): - command = ["network", "disconnect", str(self.projectName)+"_behave"] - self.issueCommand(command, components) - - def start(self, components=[]): - command = ["start"] - self.issueCommand(command, components) - - def unpause(self, components=[]): - command = ["unpause"] - self.issueCommand(command, components) - - def connect(self, components=[]): - command = ["network", "connect", str(self.projectName)+"_behave"] - self.issueCommand(command, components) - - def docker_exec(self, command, components=[]): - results = {} - updatedCommand = " ".join(command) - for component in components: - execCommand = ["exec", component, updatedCommand] - results[component] = self.issueCommand(execCommand, []) - return results - - def parseComposeFilesArg(self, composeFileArgs): - argSubList = [["-f", composeFile] for composeFile in composeFileArgs] - args = [arg for sublist in argSubList for arg in sublist] - return args - - def getFileArgs(self): - return self.parseComposeFilesArg(self.composeFilesYaml) - - def getEnvFromContainer(self, container_name, key): - value = "" - for container in self.containerDataList: - if container_name in container.containerName: - value = container.getEnv(key) - break - return value - - def setFabricCaEnv(self, ca): - name = ca.split('.', 1) - ofileLoc = './configs/{0}/ordererOrganizations/{1}/ca/'.format(self.projectName, name[1]) - pfileLoc = './configs/{0}/peerOrganizations/{1}/ca/'.format(self.projectName, name[1]) - if os.path.exists(ofileLoc): - fileLoc = ofileLoc - else: - fileLoc = pfileLoc - assert os.path.exists(fileLoc),'File "{0}" does not exist'.format(fileLoc) - filename = self.lookForKeyFile(fileLoc) - - keyVals = [] - fullOrg = name[1].split('.') - org = fullOrg[0] - - self.environ['FABRIC_CA_SERVER_{}_TLS_KEYFILE'.format(org.upper())] = '/var/hyperledger/fabric-ca-server/ca/{}'.format(filename) - self.environ['FABRIC_CA_SERVER_{}_CA_KEYFILE'.format(org.upper())] = '/var/hyperledger/fabric-ca-server/ca/{}'.format(filename) - - #copy keyfile to msp/keystore - if not os.path.exists("{0}../msp/keystore".format(fileLoc)): - os.mkdir("{0}../msp/keystore".format(fileLoc)) - shutil.copyfile("{0}{1}".format(fileLoc, filename), "{0}../msp/keystore/{1}".format(fileLoc, filename)) - - with open("./configs/fabric-ca-server-config.yaml", "r") as template: - config = template.read().format(orgName=name[1]) - fd = open("{0}../fabric-ca-server-config.yaml".format(fileLoc), "w") - fd.write(config) - fd.close() - - shutil.copyfile("./configs/fabric-ca-server-config.yaml", "{0}../fabric-ca-server-config.yaml".format(fileLoc)) - - def lookForKeyFile(self, fileLoc): - filename = "" - files = os.listdir(fileLoc) - for fn in files: - if fn.endswith('sk'): - filename = fn - return filename - - def getEnvAdditions(self): - myEnv = {} - myEnv = self.environ.copy() - myEnv["COMPOSE_PROJECT_NAME"] = self.projectName - myEnv["CORE_PEER_NETWORKID"] = self.projectName - return myEnv - - def getEnv(self, container=''): - myEnv = os.environ.copy() - for key,value in self.getEnvAdditions().items(): - - if key == container and type(value) == dict: - # If these are container specific environment variables - # copy these env vars - for c_key, c_value in value.items(): - myEnv[c_key] = c_value - elif type(value) != dict: - # Skipping any env vars that contain dict values - # for containers that we don't care about - myEnv[key] = value - return myEnv - - def refreshContainerIDs(self): - containers = self.issueCommand(["ps", "-q"]).split() - return containers - - def getContainerIP(self, container): - container_ipaddress = None - if container['State']['Running']: - container_ipaddress = container['NetworkSettings']['IPAddress'] - if not container_ipaddress and container['NetworkSettings']['Networks']: - # ipaddress not found at the old location, try the new location - container_ipaddress = container['NetworkSettings']['Networks'].values()[0]['IPAddress'] - return container_ipaddress - - def getContainerFromName(self, containerName, containerList): - container = None - for container in containerList: - if containerName == container.containerName: - break - return container - - def issueCommand(self, command, components=[]): - componentList = [] - useCompose = True - # Some commands need to be run using "docker" and not "docker-compose" - docker_only_commands=["network", "start", "stop", "pause", "unpause"] - for component in components: - if '_' in component: - useCompose = False - componentList.append("%s_%s" % (self.projectName, component)) - else: - break - # If we need to perform docker network commands, use docker, not - # docker-compose - if command[0] in docker_only_commands: - useCompose = False - - # If we need to perform an operation on a specific container, use - # docker not docker-compose - if useCompose and command[0] != "exec": - cmdArgs = self.getFileArgs()+ command + components - cmd = ["docker-compose"] + cmdArgs - elif command[0] == "exec": - cmdArgs = command + componentList - cmdList = ["docker"] + cmdArgs - cmd = [" ".join(cmdList)] - elif command[0] in docker_only_commands: - cmdArgs = command + components - cmd = ["docker"] + cmdArgs - else: - cmdArgs = command + componentList - cmd = ["docker"] + cmdArgs - - try: - if cmd[0].startswith("docker exec"): - process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.getEnv()) - output, _error = process.communicate() - if "Error: " in _error or "CRIT " in _error: - raise Exception(_error) - else: - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.getEnv()) - output, _error = process.communicate() - if _error: - raise Exception(_error) - except: - err = "Error occurred {0}: {1}".format(cmd, sys.exc_info()[1]) - output = err - - # Don't rebuild if ps command - if command[0] !="ps" and command[0] !="config": - self.rebuildContainerData() - return str(output) - - def updateContainerEnviron(self, container_name, keyValList): - for containerID in self.refreshContainerIDs(): - # get container metadata - cmd = ["docker", "inspect", containerID] - try: - output = subprocess.check_output(cmd) - except: - err = "Error occurred {0}: {1}".format(cmd, sys.exc_info()[1]) - continue - container = json.loads(str(output))[0] - # container name - if container_name == container['Name'][1:]: - config_loc = "/var/lib/docker/containers/{}/config.v2.json".format(container['Id']) - container['Config']['Env'] += keyValList - self.stop([container_name]) - with open(config_loc, "w") as fd: - fd.write(json.dumps([container])) - self.start([container_name]) - else: - continue - - def rebuildContainerData(self): - self.containerDataList = [] - for containerID in self.refreshContainerIDs(): - # get container metadata - cmd = ["docker", "inspect", containerID] - try: - output = subprocess.check_output(cmd) - except: - err = "Error occurred {0}: {1}".format(cmd, sys.exc_info()[1]) - continue - container = json.loads(str(output))[0] - # container name - container_name = container['Name'][1:] - # container ip address (only if container is running) - container_ipaddress = self.getContainerIP(container) - # container environment - container_env = container['Config']['Env'] - # container exposed ports - container_ports = container['NetworkSettings']['Ports'] - # container docker-compose service - container_compose_service = container['Config']['Labels']['com.docker.compose.service'] - container_data = ContainerData(container_name, - container_ipaddress, - container_env, - container_compose_service, - container_ports) - self.containerDataList.append(container_data) - - def decompose(self): - self.issueCommand(["unpause"], self.refreshContainerIDs()) - self.issueCommand(["down"]) - self.issueCommand(["kill"]) - self.issueCommand(["rm", "-f"]) - env = self.getEnv() - - # Now remove associated chaincode containers if any - cmd = ["docker", "ps", "-qa", "--filter", "name={0}".format(self.projectName)] - output = str(subprocess.check_output(cmd, env=env)) - container_list = output.strip().split('\n') - for container in container_list: - if container != '': - subprocess.call(['docker', 'rm', '-f', container], env=env) - - # Need to remove the chaincode images: docker rmi -f $(docker images | grep "example.com-" | awk '{print $3}') - retVal = subprocess.call(['docker images | grep ".example.com-"'], env=env, shell=True) - if retVal != 1: - cmd = ['docker images | grep ".example.com-" | awk \'{print $3}\' | xargs docker rmi'] - subprocess.call(cmd, shell=True, env=env) diff --git a/app/platform/fabric/e2e-test/feature/steps/config_util.py b/app/platform/fabric/e2e-test/feature/steps/config_util.py deleted file mode 100644 index d55dc7be5..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/config_util.py +++ /dev/null @@ -1,523 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import subprocess -import os -import sys -from shutil import copyfile -import uuid -import json -import common_util - -ORDERER_TYPES = ["solo", - "kafka", - "solo-msp"] - -PROFILE_TYPES = {"solo": "SampleInsecureSolo", - "kafka": "SampleInsecureKafka", - "solo-msp": "SampleSingleMSPSolo"} - -CHANNEL_PROFILE = "SysTestChannel" - -CFGTX_ORG_STR = ''' ---- -Organizations: - - &{orgName} - Name: {orgName} - ID: {orgMSP} - MSPDir: ./peerOrganizations/{orgMSP}/peers/peer0.{orgMSP}/msp - AnchorPeers: - - Host: peer0.{orgMSP} - Port: 7051 ''' - -ORDERER_STR = ''' -OrdererOrgs: - - Name: ExampleCom - Domain: example.com - Specs: ''' - -ORDERER_HOST = ''' - - Hostname: orderer{count} ''' - -PEER_ORG_STR = ''' - - Name: {name} - Domain: {domain} - EnableNodeOUs: {ouEnable} - Template: - Count: {numPeers} - Users: - Count: {numUsers} -''' - -def updateEnviron(context): - updated_env = os.environ.copy() - if hasattr(context, "composition"): - updated_env.update(context.composition.getEnv()) - return updated_env - -def makeProjectConfigDir(context, returnContext=False): - # Save all the files to a specific directory for the test - if not hasattr(context, "projectName") and not hasattr(context, "composition"): - context.projectName = str(uuid.uuid1()).replace('-','') - elif hasattr(context, "composition"): - context.projectName = context.composition.projectName - - testConfigs = "configs/%s" % context.projectName - if not os.path.isdir(testConfigs): - os.mkdir(testConfigs) - if returnContext: - return testConfigs, context - return testConfigs - -def buildCryptoFile(context, numOrgs, numPeers, numOrderers, numUsers, orgMSP=None, ouEnable=False): - testConfigs = makeProjectConfigDir(context) - - # Orderer Stanza - ordererHostStr = "" - for count in range(int(numOrderers)): - ordererHostStr += ORDERER_HOST.format(count=count) - ordererStr = ORDERER_STR + ordererHostStr - - # Peer Stanza - peerStanzas = "" - for count in range(int(numOrgs)): - name = "Org{0}ExampleCom".format(count+1) - domain = "org{0}.example.com".format(count+1) - if orgMSP is not None: - name = orgMSP.title().replace('.', '') - domain = orgMSP - if type(ouEnable) == bool: - ouEnableStr = common_util.convertBoolean(ouEnable) - elif ouEnable == name: - ouEnableStr = "true" - else: - ouEnableStr = "false" - peerStanzas += PEER_ORG_STR.format(name=name, domain=domain, numPeers=numPeers, numUsers=numUsers, ouEnable=ouEnableStr) - peerStr = "PeerOrgs:" + peerStanzas - - cryptoStr = ordererStr + "\n\n" + peerStr - with open("{0}/crypto.yaml".format(testConfigs), "w") as fd: - fd.write(cryptoStr) - -def setCAConfig(context): - testConfigs, context = makeProjectConfigDir(context, returnContext=True) - orgDirs = getOrgs(context) - for orgDir in orgDirs: - with open("configs/fabric-ca-server-config.yaml", "r") as fd: - config_template = fd.read() - config = config_template.format(orgName=orgDir) - with open("{0}/{1}/fabric-ca-server-config.yaml".format(testConfigs, orgDir), "w") as fd: - fd.write(config) - return context - -def setupConfigsForCA(context, channelID): - testConfigs, context = makeProjectConfigDir(context, returnContext=True) - print("testConfigs: {0}".format(testConfigs)) - - configFile = "configtx_fca.yaml" - if os.path.isfile("configs/%s.yaml" % channelID): - configFile = "%s.yaml" % channelID - - copyfile("configs/%s" % configFile, "%s/configtx.yaml" % testConfigs) - - orgDirs = getOrgs(context) - print("Org Dirs: {}".format(orgDirs)) - - for orgDir in orgDirs: - copyfile("{0}/configtx.yaml".format(testConfigs), - "{0}/{1}/msp/config.yaml".format(testConfigs, orgDir)) - - os.mkdir("{0}/{1}/msp/cacerts".format(testConfigs, orgDir)) - os.mkdir("{0}/{1}/msp/admincerts".format(testConfigs, orgDir)) - copyfile("{0}/ca.{1}-cert.pem".format(testConfigs, orgDir), - "{0}/{1}/msp/cacerts/ca.{1}-cert.pem".format(testConfigs, orgDir)) - copyfile("{0}/ca.{1}-cert.pem".format(testConfigs, orgDir), - "{0}/{1}/msp/admincerts/ca.{1}-cert.pem".format(testConfigs, orgDir)) - -def certificateSetupForCA(context): - testConfigs, context = makeProjectConfigDir(context, returnContext=True) - orgDirs = [d for d in os.listdir("./{0}/".format(testConfigs)) if (("example.com" in d) and (os.path.isdir("./{0}/{1}".format(testConfigs, d))))] - for orgDir in orgDirs: - if os.path.isdir("{0}/{1}/orderer0.example.com".format(testConfigs, orgDir)): - copyfile("{0}/configtx.yaml".format(testConfigs), - "{0}/{1}/orderer0.example.com/msp/config.yaml".format(testConfigs, orgDir)) - copyfile("{0}/{1}/orderer0.example.com/msp/signcerts/cert.pem".format(testConfigs, orgDir), - "{0}/{1}/msp/admincerts/cert.pem".format(testConfigs, orgDir)) - copyfile("{0}/ca.{1}-cert.pem".format(testConfigs, orgDir), - "{0}/{1}/msp/cacerts/ca.{1}-cert.pem".format(testConfigs, orgDir)) - -def setupConfigs(context, channelID): - testConfigs = makeProjectConfigDir(context) - print("testConfigs: {0}".format(testConfigs)) - - configFile = "configtx.yaml" - if os.path.isfile("configs/%s.yaml" % channelID): - configFile = "%s.yaml" % channelID - - copyfile("configs/%s" % configFile, "%s/configtx.yaml" % testConfigs) - - # Copy config to orderer org structures - for orgDir in os.listdir("./{0}/ordererOrganizations".format(testConfigs)): - copyfile("{0}/configtx.yaml".format(testConfigs), - "{0}/ordererOrganizations/{1}/msp/config.yaml".format(testConfigs, - orgDir)) - # Copy config to peer org structures - for orgDir in os.listdir("./{0}/peerOrganizations".format(testConfigs)): - copyfile("{0}/configtx.yaml".format(testConfigs), - "{0}/peerOrganizations/{1}/msp/config.yaml".format(testConfigs, - orgDir)) - copyfile("{0}/configtx.yaml".format(testConfigs), - "{0}/peerOrganizations/{1}/users/Admin@{1}/msp/config.yaml".format(testConfigs, - orgDir)) - -def inspectOrdererConfig(context, filename, channelID): - testConfigs = makeProjectConfigDir(context) - updated_env = updateEnviron(context) - try: - command = ["configtxgen", "-inspectBlock", filename, - "-configPath", ".", - "-channelID", channelID] - return subprocess.check_output(command, cwd=testConfigs, env=updated_env) - #return subprocess.check_output(command, env=updated_env) - except: - print("Unable to inspect orderer config data: {0}".format(sys.exc_info()[1])) - -def inspectChannelConfig(context, filename, channelID): - testConfigs = makeProjectConfigDir(context) - updated_env = updateEnviron(context) - try: - command = ["configtxgen", "-inspectChannelCreateTx", filename, - "-configPath", ".", - "-channelID", channelID] - return subprocess.check_output(command, cwd=testConfigs, env=updated_env) - #return subprocess.check_output(command, env=updated_env) - except: - print("Unable to inspect channel config data: {0}".format(sys.exc_info()[1])) - -def generateConfigForCA(context, channelID, profile, ordererProfile, block="orderer.block"): - setupConfigsForCA(context, channelID) - generateOrdererConfig(context, channelID, ordererProfile, block) - generateChannelConfig(channelID, profile, context) - generateChannelAnchorConfig(channelID, profile, context) - certificateSetupForCA(context) - -def generateConfig(context, channelID, profile, ordererProfile, block="orderer.block"): - setupConfigs(context, channelID) - generateOrdererConfig(context, channelID, ordererProfile, block) - generateChannelConfig(channelID, profile, context) - generateChannelAnchorConfig(channelID, profile, context) - -def generateOrdererConfig(context, channelID, ordererProfile, block): - testConfigs = makeProjectConfigDir(context) - updated_env = updateEnviron(context) - try: - command = ["configtxgen", "-profile", ordererProfile, - "-outputBlock", block, - "-configPath", ".", - "-channelID", channelID] - subprocess.check_call(command, cwd=testConfigs, env=updated_env) - #subprocess.check_call(command, env=updated_env) - except: - print("Unable to generate orderer config data: {0}".format(sys.exc_info()[1])) - -def generateChannelConfig(channelID, profile, context): - testConfigs = makeProjectConfigDir(context) - updated_env = updateEnviron(context) - try: - command = ["configtxgen", "-profile", profile, - "-outputCreateChannelTx", "%s.tx" % channelID, - "-configPath", ".", - "-channelID", channelID] - subprocess.check_call(command, cwd=testConfigs, env=updated_env) - #subprocess.check_call(command, env=updated_env) - except: - print("Unable to generate channel config data: {0}".format(sys.exc_info()[1])) - -def getOrgs(context): - testConfigs, context = makeProjectConfigDir(context, returnContext=True) - if os.path.exists("./{0}/peerOrganizations".format(testConfigs)): - orgs = os.listdir("./{0}/peerOrganizations".format(testConfigs)) + os.listdir("./{0}/ordererOrganizations".format(testConfigs)) - else: - orgs = [d for d in os.listdir("./{0}/".format(testConfigs)) if (("example.com" in d) and (os.path.isdir("./{0}/{1}".format(testConfigs, d))))] - - return orgs - -def generateChannelAnchorConfig(channelID, profile, context): - testConfigs = makeProjectConfigDir(context) - updated_env = updateEnviron(context) - for org in os.listdir("./{0}/peerOrganizations".format(testConfigs)): - try: - command = ["configtxgen", "-profile", profile, - "-outputAnchorPeersUpdate", "{0}{1}Anchor.tx".format(org, channelID), - "-channelID", channelID, - "-configPath", testConfigs, - "-asOrg", org.title().replace('.', '')] - #subprocess.check_call(command, cwd=testConfigs, env=updated_env) - subprocess.check_call(command, env=updated_env) - except: - print("Unable to generate channel anchor config data: {0}".format(sys.exc_info()[1])) - -def generateCrypto(context, cryptoLoc="./configs/crypto.yaml"): - testConfigs = makeProjectConfigDir(context) - updated_env = updateEnviron(context) - try: - subprocess.check_call(["cryptogen", "generate", - '--output={0}/.'.format(testConfigs), - '--config={0}'.format(cryptoLoc)], - env=updated_env) - except: - print("Unable to generate crypto material: {0}".format(sys.exc_info()[1])) - -def traverse_orderer(projectname, numOrderers, tlsExist): - # orderer stanza - opath = 'configs/' +projectname+ '/ordererOrganizations/example.com/' - capath = opath + 'ca/' - caCertificates(capath) - - msppath = opath + 'msp/' - rolebasedCertificate(msppath) - - for count in range(int(numOrderers)): - ordererpath = opath + 'orderers/' + "orderer" +str(count)+".example.com/" - mspandtlsCheck(ordererpath, tlsExist) - - userpath = opath + 'users/Admin@example.com/' - mspandtlsCheck(userpath, tlsExist) - -def traverse_peer(projectname, numOrgs, numPeers, numUsers, tlsExist, orgMSP=None): - # Peer stanza - pppath = 'configs/' +projectname+ '/peerOrganizations/' - for orgNum in range(int(numOrgs)): - if orgMSP is None: - orgMSP = "org" + str(orgNum) + ".example.com" - for peerNum in range(int(numPeers)): - orgpath = orgMSP + "/" - ppath = pppath + orgpath - peerpath = ppath +"peers/"+"peer"+str(peerNum)+ "."+ orgpath - - mspandtlsCheck(peerpath, tlsExist) - - capath = ppath + 'ca/' - caCertificates(capath) - - msppath = ppath + 'msp/' - rolebasedCertificate(msppath) - keystoreCheck(msppath) - - userAdminpath = ppath +"users/"+"Admin@"+orgpath - mspandtlsCheck(userAdminpath, tlsExist) - - for count in range(int(numUsers)): - userpath = ppath + "users/"+"User"+str(count)+"@"+orgpath - mspandtlsCheck(userpath, tlsExist) - -def generateCryptoDir(context, numOrgs, numPeers, numOrderers, numUsers, tlsExist=True, orgMSP=None): - projectname = context.projectName - traverse_peer(projectname, numOrgs, numPeers, numUsers, tlsExist, orgMSP) - traverse_orderer(projectname, numOrderers, tlsExist) - -def mspandtlsCheck(path, tlsExist): - msppath = path + 'msp/' - rolebasedCertificate(msppath) - keystoreCheck(msppath) - - if not tlsExist: - tlspath = path + 'tls/' - tlsCertificates(tlspath) - -def fileExistWithExtension(path, message, fileExt=''): - for root, dirnames, filenames in os.walk(path): - assert len(filenames) > 0, "{0}: len: {1}".format(message, len(filenames)) - fileCount = [filename.endswith(fileExt) for filename in filenames] - assert fileCount.count(True) >= 1 - -def rolebasedCertificate(path): - adminpath = path + "admincerts/" - fileExistWithExtension(adminpath, "There is not .pem cert in {0}.".format(adminpath), '.pem') - - capath = path + "cacerts/" - fileExistWithExtension(capath, "There is not .pem cert in {0}.".format(capath), '.pem') - - signcertspath = path + "signcerts/" - fileExistWithExtension(signcertspath, "There is not .pem cert in {0}.".format(signcertspath), '.pem') - - tlscertspath = path + "tlscerts/" - fileExistWithExtension(tlscertspath, "There is not .pem cert in {0}.".format(tlscertspath), '.pem') - -def caCertificates(path): - # There are no ca directories containing pem files - fileExistWithExtension(path, "There are missing files in {0}.".format(path), '_sk') - fileExistWithExtension(path, "There is not .pem cert in {0}.".format(path), '.pem') - -def tlsCertificates(path): - for root, dirnames, filenames in os.walk(path): - assert len(filenames) == 3, "There are missing certificates in the {0} dir".format(path) - for filename in filenames: - assert filename.endswith(('.crt','.key')), "The files in the {0} directory are incorrect".format(path) - -def keystoreCheck(path): - keystorepath = path + "keystore/" - fileExistWithExtension(keystorepath, "There are missing files in {0}.".format(keystorepath), '') - -def buildConfigtx(testConfigs, orgName, mspID): - configtx = CFGTX_ORG_STR.format(orgName=orgName, orgMSP=mspID) - with open("{}/configtx.yaml".format(testConfigs), "w") as fd: - fd.write(configtx) - -def getNewOrg(context, mspID): - testConfigs, context = makeProjectConfigDir(context, returnContext=True) - updated_env = updateEnviron(context) - - orgName = mspID.title().replace(".", "") - copyfile("{}/configtx.yaml".format(testConfigs), "{}/orig_configtx.yaml".format(testConfigs)) - buildConfigtx(testConfigs, orgName, mspID) - try: - command = ["configtxgen", "-printOrg", orgName] - args = subprocess.check_output(command, cwd=testConfigs, env=updated_env) - print("Result of printOrg: ".format(args)) - except: - print("Unable to inspect orderer config data: {0}".format(sys.exc_info()[1])) - args = "" - - # Save the org config and reinstate the original configtx.yaml - copyfile("{}/configtx.yaml".format(testConfigs), "{}/configtx_org3.yaml".format(testConfigs)) - copyfile("{}/orig_configtx.yaml".format(testConfigs), "{}/configtx.yaml".format(testConfigs)) - return {orgName: json.loads(args)} - -def delNewOrg(context, group, mspID, channel): - updated_env = updateEnviron(context) - testConfigs = "./configs/{0}".format(context.projectName) - inputFile = "{0}.block".format(channel) - - # configtxlator proto_decode --input config_block.pb --type common.Block | jq .data.data[0].payload.data.config > config.json - configStr = subprocess.check_output(["configtxlator", "proto_decode", "--input", inputFile, "--type", "common.Block"], cwd=testConfigs , env=updated_env) - config = json.loads(configStr) - - with open("{0}/config.json".format(testConfigs), "w") as fd: - fd.write(json.dumps(config["data"]["data"][0]["payload"]["data"]["config"], indent=4)) - - # configtxlator proto_encode --input config.json --type common.Config --output config.pb - subprocess.check_output(["configtxlator", "proto_encode", "--input", "config.json", "--type", "common.Config", "--output", "config.pb"], - cwd=testConfigs, - env=updated_env) - - config["data"]["data"][0]["payload"]["data"]["config"]["channel_group"]["groups"][group]["groups"].pop(mspID) - return config - -def getCaCert(context, node, fca): - #fabric-ca-client getcacert -d -u https://$CA_HOST:7054 -M $ORG_MSP_DIR - if node.startswith("orderer"): - mspdir = context.composition.getEnvFromContainer(node, 'ORDERER_GENERAL_LOCALMSPDIR') - elif node.startswith("peer"): - mspdir = context.composition.getEnvFromContainer(node, 'CORE_PEER_MSPCONFIGPATH') - output = context.composition.docker_exec(["fabric-ca-client getcacert -d -u https://{0}:7054 -M {1}".format(fca, mspdir)], [node]) - print("Output getcacert: {}".format(output)) - -def getUserPass(context, container_name): - for container in context.composition.containerDataList: - if container_name in container.containerName: - userpass = container.getEnv('BOOTSTRAP_USER_PASS') - break - -def registerUsers(context): - for user in context.users.keys(): - #fabric-ca-client register -d --id.name $ADMIN_NAME --id.secret $ADMIN_PASS - org = context.users[user]['organization'] - passwd = context.users[user]['password'] - role = context.users[user]['role'] - fca = 'ca.{}'.format(org) - output = context.composition.docker_exec(["fabric-ca-client register -d --id.name {0} --id.secret {1}".format(user, passwd)], [fca]) - print("user register: {}".format(output)) - -def registerWithABAC(context, user): - ''' - ABAC == Attribute Based Access Control - ''' - org = context.users[user]['organization'] - passwd = context.users[user]['password'] - role = context.users[user]['role'] - fca = 'ca.{}'.format(org) - attr = [] - for abac in context.abac.keys(): - if context.abac[abac] == 'required': - attr.append("{0}=true:ecert".format(abac)) - else: - attr.append("{0}=true".format(abac)) - #fabric-ca-client register -d --id.name $ADMIN_NAME --id.secret $ADMIN_PASS --id.attrs "hf.admin=true:ecert" - attr_reqs = ",".join(attr) - output = context.composition.docker_exec(['fabric-ca-client register -d --id.name {0} --id.secret {1} --id.attrs "{2}"'.format(user, passwd, attr_reqs)], [fca]) - print("ABAC register: {}".format(output)) - -def addNewOrg(context, config_update, group, channel): - updated_env = updateEnviron(context) - testConfigs = "./configs/{0}".format(context.projectName) - inputFile = "{0}.block".format(channel) - - # configtxlator proto_decode --input config_block.pb --type common.Block | jq .data.data[0].payload.data.config > config.json - configStr = subprocess.check_output(["configtxlator", "proto_decode", "--input", inputFile, "--type", "common.Block"], cwd=testConfigs , env=updated_env) - config = json.loads(configStr) - - with open("{0}/config.json".format(testConfigs), "w") as fd: - fd.write(json.dumps(config["data"]["data"][0]["payload"]["data"]["config"], indent=4)) - - # configtxlator proto_encode --input config.json --type common.Config --output config.pb - subprocess.check_output(["configtxlator", "proto_encode", "--input", "config.json", "--type", "common.Config", "--output", "config.pb"], - cwd=testConfigs, - env=updated_env) - - # groups = "Application" - # config_update = {"Org3ExampleCom": } - config["data"]["data"][0]["payload"]["data"]["config"]["channel_group"]["groups"][group]["groups"].update(config_update) - return config - -def configUpdate(context, config, group, channel): - updated_env = updateEnviron(context) - testConfigs = "./configs/{0}".format(context.projectName) - - with open("{0}/modified_config.json".format(testConfigs), "w") as fd: - fd.write(json.dumps(config["data"]["data"][0]["payload"]["data"]["config"], indent=4)) - - print("Modified config: {}".format(config["data"]["data"][0]["payload"]["data"]["config"]["channel_group"]["groups"][group]["groups"])) - - # configtxlator proto_encode --input config.json --type common.Config --output config.pb - subprocess.check_output(["configtxlator", "proto_encode", "--input", "config.json", "--type", "common.Config", "--output", "config.pb"], - cwd=testConfigs, - env=updated_env) - - # configtxlator proto_encode --input modified_config.json --type common.Config --output modified_config.pb - subprocess.check_output(["configtxlator", "proto_encode", "--input", "modified_config.json", "--type", "common.Config", "--output", "modified_config.pb"], - cwd=testConfigs, - env=updated_env) - - # configtxlator compute_update --channel_id $CHANNEL_NAME --original config.pb --updated modified_config.pb --output update.pb - subprocess.check_output(["configtxlator", "compute_update", "--channel_id", channel, "--original", "config.pb", "--updated", "modified_config.pb", "--output", "update.pb"], - cwd=testConfigs, - env=updated_env) - - # configtxlator proto_decode --input update.pb --type common.ConfigUpdate | jq . > org3_update.json - configStr = subprocess.check_output(["configtxlator", "proto_decode", "--input", "update.pb", "--type", "common.ConfigUpdate"], - cwd=testConfigs, - env=updated_env) - config = json.loads(configStr) - - # echo '{"payload":{"header":{"channel_header":{"channel_id":"mychannel", "type":2}},"data":{"config_update":'$(cat org3_update.json)'}}}' | jq . > org3_update_in_envelope.json - updatedconfig = {"payload": {"header": {"channel_header": {"channel_id": channel, - "type":2} - }, - "data": {"config_update": config} - } - } - - with open("{0}/update.json".format(testConfigs), "w") as fd: - fd.write(json.dumps(updatedconfig, indent=4)) - - # configtxlator proto_encode --input org3_update_in_envelope.json --type common.Envelope --output org3_update_in_envelope.pb - subprocess.check_output(["configtxlator", "proto_encode", "--input", "update.json", "--type", "common.Envelope", "--output", "update{0}.pb".format(channel)], - cwd=testConfigs, - env=updated_env) - - return "{0}/update{1}.pb".format(testConfigs, channel) diff --git a/app/platform/fabric/e2e-test/feature/steps/database_util.py b/app/platform/fabric/e2e-test/feature/steps/database_util.py deleted file mode 100644 index 9ffd6a0df..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/database_util.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import json - - -def generateIndex(indexName, docName, fieldStr, path): - fields = fieldStr.split(",") - fieldData = [] - for field in fields: - if ":" in field: - keyfield = field.split(":") - fieldData.append({keyfield[0]:keyfield[1]}) - else: - fieldData.append("{}".format(field)) - - generated = {"index": {"fields":fieldData}, - "ddoc": docName, - "name": indexName, - "type": "json"} - - # Use the chaincode paths that are in the submodules or current repo - modified_path = path.replace('github.com/hyperledger/', '../').replace('fabric-test/', '../fabric-test/') - - indexLoc = "{0}/META-INF/statedb/couchdb/indexes/".format(modified_path) - if not os.path.exists(indexLoc): - os.makedirs(indexLoc) - - with open("{0}/{1}.json".format(indexLoc, indexName), "w") as fd: - json.dump(generated, fd) - print(os.listdir(indexLoc)) diff --git a/app/platform/fabric/e2e-test/feature/steps/endorser_impl.py b/app/platform/fabric/e2e-test/feature/steps/endorser_impl.py deleted file mode 100644 index 07d846521..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/endorser_impl.py +++ /dev/null @@ -1,1006 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -from behave import * -import sys -import os -import json -import time -import os -import random -import string -import struct -import marshal -import subprocess -import config_util -from endorser_util import CLIInterface, ToolInterface, SDKInterface - -try: - pbFilePath = "../feature" - sys.path.insert(0, pbFilePath) - from common import ledger_pb2 -except: - print("ERROR! Failed to import the protobuf libraries ledger_pb2 from the ../feature/common/ directory: {0}".format(sys.exc_info()[0])) - sys.exit(1) - - -@when(u'an admin sets up a channel named "{channelId}" using orderer "{orderer}"') -def setup_channel_impl(context, channelId, orderer, username="Admin"): - # Be sure there is a transaction block for this channel - config_util.generateChannelConfig(channelId, config_util.CHANNEL_PROFILE, context) - peers = context.interface.get_peers(context) - - context.interface.create_channel(context, orderer, channelId, user=username) - context.interface.fetch_channel(context, peers, orderer, channelId, user=username) - context.interface.join_channel(context, peers, channelId, user=username) - - # If using any interface besides the CLI, we should add a few seconds delay to be sure - # that the code executes successfully - if not isinstance(context.interface, CLIInterface): - time.sleep(3) - -@when(u'an admin sets up a channel named "{channelId}"') -def step_impl(context, channelId): - setup_channel_impl(context, channelId, "orderer0.example.com") - -@when(u'an admin sets up a channel') -def step_impl(context): - setup_channel_impl(context, context.interface.TEST_CHANNEL_ID, "orderer0.example.com") - -@when(u'a user "{user}" sets up a channel') -def step_impl(context, user): - setup_channel_impl(context, context.interface.TEST_CHANNEL_ID, "orderer0.example.com", username=user) - -@when(u'a user "{user}" sets up a channel named "{channelId}"') -def step_impl(context, user, channelId): - setup_channel_impl(context, channelId, "orderer0.example.com", username=user) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" with language "{language}" to "{peer}" on channel "{channel}" within {timeout:d} seconds') -def step_impl(context, path, args, name, language, peer, channel, timeout): - deploy_impl(context, path, args, name, language, peer, channel, timeout=timeout) - -@when(u'an admin deploys chaincode at path "{path}" with version "{version}" with args {args} with name "{name}" with language "{language}" to "{peer}" on channel "{channel}" within {timeout:d} seconds') -def deploy_impl(context, path, args, name, language, peer, channel, version=0, timeout=300, username="Admin", policy=None): - context.interface.deploy_chaincode(context, path, args, name, language, peer, username, timeout, channel, version, policy=policy) - -@when(u'an admin deploys chaincode at path "{path}" with version "{version}" with args {args} with name "{name}" with language "{language}" to "{peer}" on channel "{channel}"') -def step_impl(context, path, args, name, language, peer, channel, version): - deploy_impl(context, path, args, name, language, peer, channel, version) - -@when(u'an admin deploys chaincode at path "{path}" with version "{version}" with args {args} with name "{name}" with language "{language}" on channel "{channel}"') -def step_impl(context, path, args, name, language, channel, version): - deploy_impl(context, path, args, name, language, "peer0.org1.example.com", channel, version) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with policy {policy}') -def step_impl(context, path, args, policy): - deploy_impl(context, path, args, "mycc", "GOLANG", "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID, 300, policy=policy) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" with language "{language}" to "{peer}" on channel "{channel}"') -def step_impl(context, path, args, name, language, peer, channel): - deploy_impl(context, path, args, name, language, peer, channel, 300) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" to "{peer}" on channel "{channel}" within {timeout:d} seconds') -def step_impl(context, path, args, name, peer, channel, timeout): - deploy_impl(context, path, args, name, "GOLANG", peer, channel, timeout) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" to "{peer}" on channel "{channel}"') -def step_impl(context, path, args, name, peer, channel): - deploy_impl(context, path, args, name, "GOLANG", peer, channel) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" on the initial leader peer of "{org}"') -def step_impl(context, path, args, name, org): - deploy_impl(context, path, args, name, "GOLANG", context.interface.get_initial_leader(context, org), context.interface.TEST_CHANNEL_ID) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" on the initial non-leader peer of "{org}"') -def step_impl(context, path, args, name, org): - deploy_impl(context, path, args, name, "GOLANG", context.interface.get_initial_non_leader(context, org), context.interface.TEST_CHANNEL_ID) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" with language "{language}" on channel "{channel}" within {timeout:d} seconds') -def step_impl(context, path, args, name, language, channel, timeout): - deploy_impl(context, path, args, name, language, "peer0.org1.example.com", channel, timeout) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" with language "{language}" on channel "{channel}"') -def step_impl(context, path, args, name, language, channel): - deploy_impl(context, path, args, name, language, "peer0.org1.example.com", channel) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" with language "{language}" within {timeout:d} seconds') -def step_impl(context, path, args, name, language, timeout): - deploy_impl(context, path, args, name, language, "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID, timeout) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" with language "{language}"') -def step_impl(context, path, args, name, language): - deploy_impl(context, path, args, name, language, "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with language "{language}" within {timeout:d} seconds') -def step_impl(context, path, args, language, timeout): - deploy_impl(context, path, args, "mycc", language, "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID, timeout) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with language "{language}"') -def step_impl(context, path, args, language): - deploy_impl(context, path, args, "mycc", language, "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" on channel "{channel}" within {timeout:d} seconds') -def step_impl(context, path, args, name, channel, timeout): - deploy_impl(context, path, args, name, "GOLANG", "peer0.org1.example.com", channel, timeout) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" on channel "{channel}"') -def step_impl(context, path, args, name, channel): - deploy_impl(context, path, args, name, "GOLANG", "peer0.org1.example.com", channel) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}" within {timeout:d} seconds') -def step_impl(context, path, args, name, timeout): - deploy_impl(context, path, args, name, "GOLANG", "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID, timeout) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} with name "{name}"') -def step_impl(context, path, args, name): - deploy_impl(context, path, args, name, "GOLANG", "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'an admin deploys chaincode at path "{path}" with args {args} within {timeout:d} seconds') -def step_impl(context, path, args, timeout): - deploy_impl(context, path, args, "mycc", "GOLANG", "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID, timeout) - -@when(u'an admin deploys chaincode at path "{path}" with args {args}') -def step_impl(context, path, args): - deploy_impl(context, path, args, "mycc", "GOLANG", "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'an admin deploys chaincode on channel "{channel}" with args {args} within {timeout:d} seconds') -def step_impl(context, channel, args, timeout): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - args, - "mycc", - "GOLANG", - "peer0.org1.example.com", - channel, timeout) - -@when(u'an admin deploys chaincode on channel "{channel}" with args {args}') -def step_impl(context, channel, args): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - args, - "mycc", - "GOLANG", - "peer0.org1.example.com", - channel) - -@when(u'an admin deploys chaincode on channel "{channel}" within {timeout:d} seconds') -def step_impl(context, channel, timeout): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - '["init", "a", "100" , "b", "200"]', - "mycc", - "GOLANG", - "peer0.org1.example.com", - channel, timeout) - -@when(u'an admin deploys chaincode on channel "{channel}"') -def step_impl(context, channel): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - '["init", "a", "100" , "b", "200"]', - "mycc", - "GOLANG", - "peer0.org1.example.com", - channel) - -@when(u'an admin deploys chaincode with name "{name}" on channel "{channel}" within {timeout:d} seconds') -def step_impl(context, name, channel, timeout): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - '["init", "a", "100" , "b", "200"]', - name, - "GOLANG", - "peer0.org1.example.com", - channel, timeout) - -@when(u'an admin deploys chaincode with name "{name}" on channel "{channel}"') -def step_impl(context, name, channel): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - '["init", "a", "100" , "b", "200"]', - name, - "GOLANG", - "peer0.org1.example.com", - channel) - -@when(u'an admin deploys chaincode with args {args} with policy {policy}') -def step_impl(context, args, policy): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - args, - "mycc", - "GOLANG", - "peer0.org1.example.com", - context.interface.TEST_CHANNEL_ID, - 300, - policy=policy) - -@when(u'an admin deploys chaincode with args {args} within {timeout:d} seconds') -def step_impl(context, args, timeout): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - args, - "mycc", - "GOLANG", - "peer0.org1.example.com", - context.interface.TEST_CHANNEL_ID, timeout) - -@when(u'an admin deploys chaincode with args {args}') -def step_impl(context, args): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - args, - "mycc", - "GOLANG", - "peer0.org1.example.com", - context.interface.TEST_CHANNEL_ID) - -@when(u'an admin deploys chaincode within {timeout:d} seconds') -def step_impl(context, timeout): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - '["init", "a", "100" , "b", "200"]', - "mycc", - "GOLANG", - "peer0.org1.example.com", - context.interface.TEST_CHANNEL_ID, timeout) - -@when(u'an admin deploys chaincode') -def step_impl(context): - deploy_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - '["init", "a", "100" , "b", "200"]', - "mycc", - "GOLANG", - "peer0.org1.example.com", - context.interface.TEST_CHANNEL_ID) - -@when(u'an admin installs chaincode at path "{path}" of language "{language}" as version "{version}" with args {args} with name "{name}" to all peers') -def step_impl(context, path, args, name, language, version): - peers = context.interface.get_peers(context) - install_impl(context, path, args, name, language, version, peers) - -@when(u'an admin installs chaincode at path "{path}" of language "{language}" as version "{version}" with args {args} with name "{name}" to "{peer}"') -def step_impl(context, path, args, name, peer, language, version): - install_impl(context, path, args, name, language, version, [peer]) - -@when(u'an admin installs chaincode at path "{path}" of language "{language}" as version "{version}" with args {args} with name "{name}"') -def install_impl(context, path, args, name, language, version, peers=["peer0.org1.example.com"], username="Admin"): - context.interface.pre_deploy_chaincode(context, path, args, name, language, version=version) - context.interface.install_chaincode(context, peers, username) - -@when(u'an admin installs chaincode at path "{path}" as version "{version:d}" with args {args} with name "{name}" to "{peer}"') -def step_impl(context, path, args, name, version, peer): - install_impl(context, path, args, name, "GOLANG", version, [peer]) - -@when(u'an admin installs chaincode at path "{path}" as version "{version}" with args {args} with name "{name}"') -def step_impl(context, path, args, name, version): - install_impl(context, path, args, names, "GOLANG", version) - -@when(u'an admin installs chaincode at path "{path}" as version "{version:d}" with args {args} with name "{name}" on all peers') -def step_impl(context, path, args, name, version, username="Admin"): - peers = context.interface.get_peers(context) - context.interface.pre_deploy_chaincode(context, path, args, name, "GOLANG", version=version) - context.interface.install_chaincode(context, peers, "Admin") - -@when(u'an admin installs chaincode at path "{path}" as version "{version:d}" with args {args} on all peers') -def step_impl(context, path, version, args, username="Admin"): - peers = context.interface.get_peers(context) - context.interface.pre_deploy_chaincode(context, path, args, "mycc", "GOLANG", version=version) - context.interface.install_chaincode(context, peers, "Admin") - -@when(u'an admin installs chaincode at path "{path}" with args {args} on all peers') -def step_impl(context, path, args, username="Admin"): - peers = context.interface.get_peers(context) - context.interface.pre_deploy_chaincode(context, path, args, "mycc", "GOLANG", version="0") - context.interface.install_chaincode(context, peers, "Admin") - -@when(u'an admin installs chaincode at path "{path}" with args {args} with name "{name}" to "{peer}"') -def step_impl(context, path, args, name, peer): - install_impl(context, path, args, name, "GOLANG", "0", [peer]) - -@when(u'an admin installs chaincode on all peers') -def step_impl(context): - peers = context.interface.get_peers(context) - install_impl(context, - "github.com/hyperledger/fabric/examples/chaincode/go/example02/cmd", - '["init", "a", "100" , "b", "200"]', - "mycc", - "GOLANG", - "0") - -@when(u'an admin upgrades the chaincode with name "{name}" on channel "{channel}" to version "{version:d}" on peer "{peer}" with args {args}') -def upgrade_impl(context, channel, version, peer, name=None, args=None, timeout=120): - if args: - context.chaincode["args"] = args - if name: - context.chaincode["name"] = name - context.chaincode["version"] = version - context.chaincode["channelID"] = channel - context.interface.upgrade_chaincode(context, "orderer0.example.com", peer, channel) - context.interface.post_deploy_chaincode(context, peer, timeout) - -@when(u'an admin upgrades the chaincode on channel "{channel}" to version "{version:d}" on peer "{peer}" with args {args}') -def step_impl(context, channel, version, peer, args): - upgrade_impl(context, channel, version, peer, "mycc", args) - -@when(u'an admin upgrades the chaincode with name "{name}" on channel "{channel}" to version "{version:d}" with args {args}') -def step_impl(context, name, channel, version, args): - upgrade_impl(context, channel, version, "peer0.org1.example.com", name, args) - -@when(u'an admin upgrades the chaincode on channel "{channel}" to version "{version:d}" on peer "{peer}"') -def step_impl(context, channel, version, peer): - upgrade_impl(context, channel, version, peer) - -@when(u'an admin upgrades the chaincode on channel "{channel}" on peer "{peer}" with args {args}') -def step_impl(context, channel, peer, args): - upgrade_impl(context, channel, 1, peer, args) - -@when(u'an admin upgrades the chaincode on channel "{channel}" on peer "{peer}"') -def step_impl(context, channel, peer): - upgrade_impl(context, channel, 1, peer) - -@when(u'an admin upgrades the chaincode to version "{version}" on peer "{peer}" with args {args}') -def step_impl(context, version, peer, args): - upgrade_impl(context, context.interface.TEST_CHANNEL_ID, version, peer, "mycc", args) - -@when(u'an admin upgrades the chaincode on peer "{peer}" with args {args}') -def step_impl(context, peer, args): - upgrade_impl(context, context.interface.TEST_CHANNEL_ID, 1, peer, args) - -@when(u'an admin upgrades the chaincode on peer "{peer}"') -def step_impl(context, channel, peer): - upgrade_impl(context, context.interface.TEST_CHANNEL_ID, 1, peer) - -@when(u'an admin instantiates the chaincode on channel "{channel}" on peer "{peer}"') -def instantiate_impl(context, peer, channel, username="Admin", timeout=120): - context.chaincode["channelID"] = channel - context.interface.instantiate_chaincode(context, peer, username) - context.interface.post_deploy_chaincode(context, peer, timeout) - -@when(u'an admin instantiates the chaincode on "{peer}"') -def step_impl(context, peer): - instantiate_impl(context, peer, context.chaincode["channelID"]) - -@when(u'an admin queries for channel information') -def step_impl(context): - get_chain_info_impl(context, context.interface.TEST_CHANNEL_ID) - -@when(u'an admin queries for channel information on channel "{channel}"') -def get_chain_info_impl(context, channel): - chaincode = {"args": '["GetChainInfo","{}"]'.format(channel), - "chaincodeId": 'qscc', - "name": 'qscc'} - - result = context.interface.query_chaincode(context, chaincode, "peer0.org1.example.com", channel, user="Admin") - context.result["peer0.org1.example.com"] = marshal.dumps(result["peer0.org1.example.com"]) - context.result["peer0.org1.example.com"] = result["peer0.org1.example.com"].encode("ascii", "ignore") - -@when(u'an admin queries for the first block') -def step_impl(context): - get_block_num_impl(context, "1", context.interface.TEST_CHANNEL_ID) - -@when(u'an admin queries for the first block on the channel "{channel}"') -def step_impl(context, channel): - get_block_num_impl(context, "1", channel) - -@when(u'an admin queries for block number "{number}" on the channel "{channel}"') -def get_block_num_impl(context, number, channel): - updated_env = config_util.updateEnviron(context) - time.sleep(2) - chaincode = {"args": '["GetBlockByNumber","{0}","{1}"]'.format(channel, number), - "chaincodeId": 'qscc', - "name": 'qscc'} - context.result = context.interface.query_chaincode(context, chaincode, "peer0.org1.example.com", channel, user="Admin") - -@when(u'an admin queries for last transaction using the transaction ID') -def step_impl(context, number, channel): - chaincode = {"args": '["GetTransactionByID","{0}","{1}"]'.format(channel, txId), - "chaincodeId": 'qscc', - "name": 'qscc'} - context.result = context.interface.query_chaincode(context, chaincode, "peer0.org1.example.com", channel, user="Admin") - -@when(u'a user queries on the channel "{channel}" using chaincode named "{name}" for the random key with args {args} on "{peer}"') -def step_impl(context, channel, name, args, peer): - query_impl(context, channel, name, args.format(random_key=context.random_key), peer) - -@when(u'a user queries on the chaincode named "{name}" for the random key with args {args} on "{peer}"') -def step_impl(context, name, args, peer): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args.format(random_key=context.random_key), str(peer)) - -@when(u'a user queries on the chaincode named "{name}" for the random key with args {args}') -def step_impl(context, name, args): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args.format(random_key=context.random_key), "peer0.org1.example.com") - -@when(u'a user queries on the chaincode for the random key with args {args}"') -def step_impl(context, args): - query_impl(context, context.interface.TEST_CHANNEL_ID, "mycc", args.format(random_key=context.random_key), "peer0.org1.example.com") - -@when(u'a user queries on the chaincode named "{name}" with args {args} on the initial leader peer of "{org}"') -def step_impl(context, name, args, org): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, context.interface.get_initial_leader(context, org)) - -@when(u'a user queries on the chaincode named "{name}" with args {args} on the initial non-leader peer of "{org}"') -def step_impl(context, name, args, org): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, context.interface.get_initial_non_leader(context, org)) - -@when(u'a user queries on the chaincode named "{name}" with dynamic args {args} on "{peer}"') -def step_impl(context, name, args, peer): - # Temporarily sleep for 2 sec. This delay should be able to be removed once we start using events for being sure the invokes are complete - time.sleep(2) - chaincode = {"args": args.format(last_key=context.last_key), - "chaincodeId": str(name), - "name": str(name)} - context.result = context.interface.query_chaincode(context, chaincode, str(peer), context.interface.TEST_CHANNEL_ID, user="User1") - -@when(u'a user queries on version "{version:d}" of the channel "{channel}" using chaincode named "{name}" with args {args} on "{peer}"') -def query_impl(context, channel, name, args, peer, targs='', version=0, user="User1", opts={}): - # Temporarily sleep for 2 sec. This delay should be able to be removed once we start using events for being sure the invokes are complete - time.sleep(2) - chaincode = {"args": args, - "chaincodeId": str(name), - "version": version, - "name": str(name)} - context.result = context.interface.query_chaincode(context, chaincode, peer, channel, targs, user=user, opts=opts) - -@when(u'a user queries on the channel "{channel}" using chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, channel, name, args, peer): - query_impl(context, channel, name, args, peer) - -@when(u'a user "{user}" queries on the channel "{channel}" using chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, user, channel, name, args, peer): - query_impl(context, channel, name, args, peer, user=user) - -@when(u'a user queries on the chaincode named "{name}" with args {args} and generated transient args {targs} on "{peer}"') -def step_impl(context, name, args, peer, targs): - # This is a workaround to allow targs to send a json structure - generated = targs[2:-2].format(**context.command_result) - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, str(peer), targs[:2] + generated+ targs[-2:]) - -@when(u'a user queries on the chaincode named "{name}" with args {args} and generated transient args {targs}') -def step_impl(context, name, args, targs): - # This is a workaround to allow targs to send a json structure - generated = targs[2:-2].format(**context.command_result) - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com", targs[:2] + generated + targs[-2:]) - -@when(u'a user queries on the chaincode named "{name}" with args {args} and transient args {targs} on "{peer}"') -def step_impl(context, name, args, peer, targs): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, str(peer), targs) - -@when(u'a user queries on the chaincode named "{name}" with args {args} and transient args {targs}') -def step_impl(context, name, args, targs): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com", targs) - -@when(u'a user queries on the chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, name, args, peer): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, str(peer)) - -@when(u'a user "{user}" queries on the chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, user, name, args, peer): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, str(peer), user=user) - -@when(u'a user queries on the chaincode named "{name}" on channel "{channel}" with args {args}') -def step_impl(context, name, channel, args): - query_impl(context, channel, name, args, "peer0.org1.example.com") - -@when(u'a user "{user}" queries on the chaincode named "{name}" on channel "{channel}" with args {args}') -def step_impl(context, user, name, channel, args): - query_impl(context, channel, name, args, "peer0.org1.example.com", user=user) - -@when(u'a user queries on the chaincode named "{name}" with args {args}') -def step_impl(context, name, args): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com") - -@when(u'a user "{user}" queries on the chaincode named "{name}" with args {args}') -def step_impl(context, user, name, args): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com", user=user) - -@when(u'a user evaluates a transaction on the chaincode named "{name}" with args {args}') -def step_impl(context, name, args): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com", opts={"network-model": "true"}) - -@when(u'a user queries on the channel "{channel}" using chaincode named "{name}" with args {args}') -def step_impl(context, channel, name, args): - query_impl(context, channel, name, args, "peer0.org1.example.com") - -@when(u'a user "{user}" queries on the channel "{channel}" using chaincode named "{name}" with args {args}') -def step_impl(context, user, channel, name, args): - query_impl(context, channel, name, args, "peer0.org1.example.com", user=user) - -@when(u'a user queries on the chaincode on channel "{channel}" with args {args}') -def step_impl(context, channel, args): - query_impl(context, channel, context.chaincode["name"], args, "peer0.org1.example.com") - -@when(u'a user "{user}" queries on the chaincode on channel "{channel}" with args {args}') -def step_impl(context, user, channel, args): - query_impl(context, channel, context.chaincode["name"], args, "peer0.org1.example.com", user=user) - -@when(u'a user queries on the chaincode with args {args} from "{peer}"') -def step_impl(context, args, peer): - query_impl(context, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], args, str(peer)) - -@when(u'a user "{user}" queries on the chaincode with args {args} from "{peer}"') -def step_impl(context, user, args, peer): - query_impl(context, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], args, str(peer), user=user) - -@when(u'a user queries on the chaincode with args {args}') -def step_impl(context, args): - query_impl(context, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], args, "peer0.org1.example.com") - -@when(u'a user "{user}" queries on the chaincode with args {args}') -def step_impl(context, user, args): - query_impl(context, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], args, "peer0.org1.example.com", user=user) - -@when(u'a user queries on the chaincode named "{name}"') -def step_impl(context, name): - query_impl(context, context.interface.TEST_CHANNEL_ID, name, '["query","a"]', "peer0.org1.example.com") - -@when(u'a user queries on the chaincode') -def step_impl(context): - query_impl(context, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], '["query","a"]', "peer0.org1.example.com") - -@when(u'a user invokes {numInvokes:d} times on the channel "{channel}" using chaincode named "{name}" with args {args} on "{peer}" using orderer "{orderer}"') -def invokes_impl(context, numInvokes, channel, name, args, peer, orderer="orderer0.example.com", targs='', user="User1", opts={}): - chaincode = {"args": args, - "name": str(name), - "chaincodeId": str(name)} - for count in range(numInvokes): - context.result = context.interface.invoke_chaincode(context, chaincode, orderer, peer, channel, targs, user=user, opts=opts) - -@when(u'a user invokes {numInvokes:d} times on the channel "{channel}" using chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, numInvokes, channel, name, args): - invokes_impl(context, numInvokes, channel, name, args, "peer0.org1.example.com") - -@when(u'a user invokes {numInvokes:d} times on the channel "{channel}" using chaincode named "{name}" with args {args}') -def step_impl(context, numInvokes, channel, name, args): - invokes_impl(context, numInvokes, channel, name, args, "peer0.org1.example.com") - -@when(u'a user invokes {numInvokes:d} times using chaincode with args {args}') -def step_impl(context, numInvokes, args): - invokes_impl(context, numInvokes, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], args, "peer0.org1.example.com") - -@when(u'a user invokes {numInvokes:d} times using chaincode named "{name}" with args {args}') -def step_impl(context, numInvokes, name, args): - invokes_impl(context, numInvokes, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com") - -@when(u'a user invokes marble {startNum:d} to {endNum:d} with the last {numShare:d} of them with owner "{owner}", color "{color}" and size "{size}" in channel named "{channel}" in chaincode "{cc_name}"') -def step_impl(context, startNum, endNum, numShare, owner, color, size, channel, cc_name): - for x in range(startNum, endNum+1-numShare): - args="[\"initMarble\",\"marble"+str(x)+"\",\"blue\",\"35\",\"jane\"]" - invokes_impl(context, 1, channel, cc_name, args, "peer0.org1.example.com") - for x in range(endNum+1-numShare, endNum+1): - args="[\"initMarble\",\"marble"+str(x)+"\",\""+color+"\",\""+size+"\",\""+owner+"\"]" - invokes_impl(context, 1, channel, cc_name, args, "peer0.org1.example.com") - -@when(u'a user invokes marble {startNum:d} to {endNum:d} with owner "{owner}", color "{color}" and size "{size}"') -def step_impl(context, startNum, endNum, owner, color, size): - for x in range(startNum, endNum+1): - args="[\"initMarble\",\"marble"+str(x)+"\",\""+color+"\",\""+size+"\",\""+owner+"\"]" - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, "mycc", args, "peer0.org1.example.com") - -@when(u'a user invokes marble {startNum:d} to {endNum:d}') -def step_impl(context, startNum, endNum): - for x in range(startNum, endNum+1): - args="[\"initMarble\",\"marble"+str(x)+"\",\"blue\",\"35\",\"jane\"]" - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, "mycc", args, "peer0.org1.example.com") - -@when(u'a user "{user}" invokes on the channel "{channel}" using chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, user, channel, name, args, peer): - invokes_impl(context, 1, channel, name, args, peer, user=user) - -@when(u'a user invokes on the channel "{channel}" using chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, channel, name, args, peer): - invokes_impl(context, 1, channel, name, args, peer) - -@when(u'a user invokes on the chaincode named "{name}" with args {args} and generated transient args {targs} on "{peer}"') -def step_impl(context, name, args, targs, peer): - # This is a workaround to allow targs to send a json structure - generated = targs[2:-2].format(**context.command_result) - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, str(peer), targs=targs[:2] + generated + targs[-2:]) - -@when(u'a user invokes on the chaincode named "{name}" with args {args} and generated transient args {targs}') -def step_impl(context, name, args, targs): - # This is a workaround to allow targs to send a json structure - generated = targs[2:-2].format(**context.command_result) - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com", targs=targs[:2] + generated + targs[-2:]) - -@when(u'a user invokes on the chaincode named "{name}" with args {args} and transient args {targs} on "{peer}"') -def step_impl(context, name, args, peer, targs): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, str(peer), targs=targs) - -@when(u'a user invokes on the chaincode named "{name}" with args {args} and transient args {targs}') -def step_impl(context, name, args, targs): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com", targs=targs) - -@when(u'a user invokes on the chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, name, args, peer): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, str(peer)) - -@when(u'a user invokes on the chaincode with args {args} on "{peer}"') -def step_impl(context, args, peer): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, "mycc", args, str(peer)) - -@when(u'a user "{user}" invokes on the chaincode with args {args} on "{peer}"') -def step_impl(context, user, args, peer): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, "mycc", args, str(peer), user=user) - -@when(u'a user invokes on the chaincode with args {args}') -def step_impl(context, args): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], args, "peer0.org1.example.com") - -@when(u'a user "{user}" invokes on the chaincode with args {args}') -def step_impl(context, user, args): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], args, "peer0.org1.example.com", user=user) - -@when(u'a user invokes on the channel "{channel}" using chaincode named "{name}" with args {args}') -def step_impl(context, channel, name, args): - invokes_impl(context, 1, channel, name, args, "peer0.org1.example.com") - -@when(u'a user invokes on the chaincode named "{name}" with args {args} on the initial leader peer of "{org}"') -def step_impl(context, name, args, org): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, context.interface.get_initial_leader(context, org)) - -@when(u'a user invokes on the chaincode named "{name}" with args {args} on the initial non-leader peer of "{org}"') -def step_impl(context, name, args, org): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, context.interface.get_initial_non_leader(context, org)) - -@when(u'a user "{user}" invokes on the chaincode named "{name}" with args {args} on "{peer}"') -def step_impl(context, user, name, args, peer): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, str(peer), user=user) - -@when(u'a user invokes on the chaincode named "{name}" with args {args}') -def step_impl(context, name, args): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com") - -@when(u'a user "{user}" invokes on the chaincode named "{name}" with args {args}') -def step_impl(context, user, name, args): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com", user=user) - -@when(u'a user submits a transaction on the chaincode named "{name}" with args {args}') -def step_impl(context, name, args): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, "peer0.org1.example.com","orderer0.example.com", opts={"network-model": "true"}) - -@when(u'a user invokes {numInvokes:d} times on the chaincode') -def step_impl(context, numInvokes): - invokes_impl(context, numInvokes, context.interface.TEST_CHANNEL_ID, context.chaincode["name"], '["invoke","a","b","5"]', "peer0.org1.example.com") - -@when(u'a user invokes random args {args} of length {length:d} {numInvokes:d} times on the chaincode named "{name}"') -def step_impl(context, args, length, numInvokes, name): - for num in range(numInvokes): - random_invoke_impl(context, name, args, length, "peer0.org1.example.com", "orderer0.example.com") - -@when(u'a user invokes args with {count:d} random key/values of length {length:d} on the chaincode named "{name}"') -def step_impl(context, count, length, name): - keyVals = [] - for index in range(count): - key = 'a{index}'.format(index=index) - keyVals.append('"{}"'.format(key)) - payload = ''.join(random.choice(string.ascii_letters) for _ in range(length)) - keyVals.append('"{}"'.format(payload)) - context.last_key = key - context.payload = {"payload": payload, "len": length} - - keyValStr = ",".join(keyVals) - chaincode = {"args": '["put",{}]'.format(keyValStr), - "chaincodeId": str(name), - "name": str(name)} - print("chaincode: {}".format(chaincode)) - context.result = context.interface.invoke_chaincode(context, chaincode, "orderer0.example.com", "peer0.org1.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'a user invokes on the channel "{channel}" using chaincode named "{name}" with random args {args} of length {length:d} on peer "{peer}" using orderer "{orderer}"') -def random_invoke_impl(context, name, args, length, peer, orderer, channel): - payload = ''.join(random.choice(string.ascii_letters) for _ in range(length)) - random_key = str(random.randint(0, sys.maxint)) - context.payload = {"payload": payload, - "len": length} - context.random_key=random_key - chaincode = {"args": args.format(random_value=payload, random_key=random_key), - "chaincodeId": str(name), - "name": str(name)} - context.result = context.interface.invoke_chaincode(context, chaincode, orderer, peer, channel) - -@when(u'a user invokes on the channel "{channel}" using chaincode named "{name}" with random args {args} of length {length:d}') -def step_impl(context, channel, name, args, length): - random_invoke_impl(context, name, args, length, "peer0.org1.example.com", "orderer0.example.com", channel) - -@when(u'a user invokes on the chaincode named "{name}" with random args {args} of length {length:d} on peer "{peer}" using orderer "{orderer}"') -def step_impl(context, name, args, length): - random_invoke_impl(context, name, args, length, str(peer), orderer, context.interface.TEST_CHANNEL_ID) - -@when(u'a user invokes on the chaincode named "{name}" with random args {args} of length {length:d} on peer "{peer}"') -def step_impl(context, name, args, length): - random_invoke_impl(context, name, args, length, str(peer), "orderer0.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'a user invokes on the chaincode named "{name}" with random args {args} of length {length:d}') -def step_impl(context, name, args, length): - random_invoke_impl(context, name, args, length, "peer0.org1.example.com", "orderer0.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'a user invokes on the chaincode named "{name}" with random args {args}') -def step_impl(context, name, args): - random_invoke_impl(context, name, args, 1024, "peer0.org1.example.com", "orderer0.example.com", context.interface.TEST_CHANNEL_ID) - -@when(u'a user invokes on the chaincode named "{name}"') -def step_impl(context, name): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, '["invoke","a","b","5"]', "peer0.org1.example.com") - -@when(u'a user invokes on the chaincode') -def step_impl(context): - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, "mycc", '["invoke","a","b","5"]', "peer0.org1.example.com") - -@when(u'a user using a {identityType} identity invokes on the chaincode named "{name}" with args {args}') -def step_impl(context, identityType, name, args): - peer = "peer0.org1.example.com" - org = "org1.example.com" - - # Save env vars from peer if it is present otherwise save the defaults - backup = context.composition.getEnv(peer) - if peer not in context.composition.environ.keys(): - context.composition.environ[peer] = {} - - # Change env vars for peer to certs for the specified identity - if identityType == "peer": - context.composition.environ[peer]["CORE_PEER_TLS_CERT_FILE"] = "/var/hyperledger/tls/server.crt" - context.composition.environ[peer]["CORE_PEER_TLS_KEY_FILE"] = "/var/hyperledger/tls/server.key" - elif identityType == "client": - context.composition.environ[peer]["CORE_PEER_TLS_CERT_FILE"] = "/var/hyperledger/users/Admin@{}/tls/client.crt".format(org) - context.composition.environ[peer]["CORE_PEER_TLS_KEY_FILE"] = "/var/hyperledger/users/Admin@{}/tls/client.key".format(org) - elif identityType == "orderer": - context.composition.environ[peer]["CORE_PEER_TLS_CERT_FILE"] = "/var/hyperledger/configs/{}/ordererOrganizations/example.com/orderers/orderer0.example.com/tls/server.crt".format(context.projectName) - context.composition.environ[peer]["CORE_PEER_TLS_KEY_FILE"] = "/var/hyperledger/configs/{}/ordererOrganizations/example.com/orderers/orderer0.example.com/tls/server.key".format(context.projectName) - else: - assert identityType in ('peer', 'client', 'orderer'), "Unknown identity type {} for invoking on the chaincode".format(identityType) - - invokes_impl(context, 1, context.interface.TEST_CHANNEL_ID, name, args, peer) - - # Reinstate the certs saved for the composition for the specified peer - if backup.get(peer, None) is not None: - context.composition.environ[peer] = backup.get(peer) - else: - context.composition.environ.pop(peer) - -@when(u'an admin creates a channel named "{channelId}" using orderer "{orderer}') -def create_channel_impl(context, channelId, orderer): - # Be sure there is a transaction block for this channel - if not os.path.exists("./configs/{0}/{1}.tx".format(context.projectName, channelId)): - config_util.generateChannelConfig(channelId, config_util.CHANNEL_PROFILE, context) - context.interface.create_channel(context, orderer, channelId) - -@when(u'an admin creates a channel named "{channelId}"') -def step_impl(context, channelId): - create_channel_impl(context, channelId, "orderer0.example.com") - -@when(u'an admin creates a channel') -def step_impl(context): - create_channel_impl(context, context.interface.TEST_CHANNEL_ID, "orderer0.example.com") - -@when(u'an admin makes all peers join the channel "{channelId}"') -def join_channel_impl(context, channelId): - peers = context.interface.get_peers(context) - context.interface.join_channel(context, peers, channelId) - -@when(u'an admin makes all peers join the channel') -def step_impl(context): - join_channel_impl(context, context.interface.TEST_CHANNEL_ID) - -@when(u'an admin makes peer "{peer}" join the channel "{channelId}"') -def step_impl(context, channelId, peer): - context.interface.join_channel(context, [peer], channelId) - -@when(u'an admin makes peer "{peer}" join the channel') -def step_impl(context, peer): - context.interface.join_channel(context, [peer], context.interface.TEST_CHANNEL_ID) - -@when(u'an admin fetches genesis information at block {block} using peer "{peer}"') -def step_impl(context, block, peer): - context.interface.fetch_channel(context, [peer], "orderer0.example.com", context.interface.TEST_CHANNEL_ID, None, block=block) - -@when(u'an admin fetches genesis information for a channel "{channelID}" using peer "{peer}" from "{orderer}" to location "{location}"') -def fetch_impl(context, channelID, peer, orderer, location, ext="block"): - context.interface.fetch_channel(context, [peer], orderer, channelID, location, ext=ext) - -@when(u'an admin fetches genesis information for a channel "{channelID}" using peer "{peer}" to location "{location}"') -def step_impl(context, channelID, peer, location): - fetch_impl(context, channelID, peer, "orderer0.example.com", location, ext='tx') - -@when(u'an admin fetches genesis information for a channel "{channelID}" using peer "{peer}"') -def step_impl(context, channelID, peer): - fetch_impl(context, channelID, peer, "orderer0.example.com", None) - -@when(u'an admin fetches genesis information using peer "{peer}" from "{orderer}" to location "{location}"') -def step_impl(context, peer, orderer, location): - fetch_impl(context, context.interface.TEST_CHANNEL_ID, peer, orderer, location) - -@when(u'an admin fetches genesis information using peer "{peer}" from "{orderer}"') -def step_impl(context, peer, orderer): - fetch_impl(context, context.interface.TEST_CHANNEL_ID, peer, orderer, None) - -@when(u'an admin fetches genesis information using peer "{peer}"') -def step_impl(context, peer): - fetch_impl(context, context.interface.TEST_CHANNEL_ID, peer, "orderer0.example.com", None) - -@when('the admin updates the "{channel}" channel using the peer "{peer}"') -def update_impl(context, peer, channel): - if not hasattr(context, "block_filename"): - filename = "/var/hyperledger/configs/{0}/update{1}.pb".format(context.composition.projectName, channel) - else: - filename = "/var/hyperledger/{}".format(context.block_filename) - - # If this is a string and not a list, convert to list - peers = peer - if type(peer) == str: - peers = [peer] - context.interface.update_channel(context, peers, channel, "orderer0.example.com", filename) - -@when('the admin updates the channel using peer "{peer}"') -def step_impl(context, peer): - update_impl(context, [peer], context.interface.TEST_CHANNEL_ID) - -@when('the admin updates the channel for all peers') -def step_impl(context): - peers = context.interface.get_peers(context) - update_impl(context, peers, context.interface.TEST_CHANNEL_ID) - -@when(u'the admin changes the policy to {policy} on channel "{channel}" with args "{args}"') -def policyChannelUpdate_impl(context, policy, channel, args=None): - context.chaincode["policy"] = policy - context.chaincode["version"] = 2 - if args is not None: - context.chaincode["args"] = args - - peers = context.interface.get_peers(context) - context.interface.install_chaincode(context, peers, user="Admin") - context.chaincode["version"] = 3 - context.interface.upgrade_chaincode(context, "orderer0.example.com", channel) - context.interface.post_deploy_chaincode(context, "peer0.org1.example.com", 120) - -@when(u'the admin changes the policy to {policy} with args "{args}"') -def step_impl(context, policy, args): - policyChannelUpdate_impl(context, policy, context.interface.TEST_CHANNEL_ID, args) - -@when(u'the admin changes the policy to {policy}') -def step_impl(context, policy): - policyChannelUpdate_impl(context, policy, context.interface.TEST_CHANNEL_ID) - -@when('the peer admin from "{peer}" signs the updated channel config for channel "{channel}"') -def sign_impl(context, peer, channel): - if not hasattr(context, "block_filename"): - filename = "/var/hyperledger/configs/{0}/update{1}.pb".format(context.composition.projectName, channel) - else: - filename = "/var/hyperledger/{}".format(context.block_filename) - - # If this is a string and not a list, convert to list - peers = peer - if type(peer) == str: - peers = [peer] - context.interface.sign_channel(context, peers, filename) - -@when('the peer admin from "{peer}" signs the updated channel config') -def step_impl(context, peer): - sign_impl(context, [peer], context.interface.TEST_CHANNEL_ID) - -@when('all organization admins sign the updated channel config') -def step_impl(context): - peers = context.interface.get_peers(context) - sign_impl(context, peers, context.interface.TEST_CHANNEL_ID) - -@when(u'a user requests to get the design doc "{ddoc_name}" for the chaincode named "{cc_name}" in the channel "{ch_name}" and from the CouchDB instance "{couchdb_instance}"') -def step_impl(context, ddoc_name, cc_name, ch_name, couchdb_instance): - cmd=["curl", "-k", "-X", "GET", couchdb_instance+"/"+ch_name+"_"+cc_name+"/_design/"+ddoc_name] - print("cmd is: "+" ".join(str(p) for p in cmd)+"\n") - context.result=subprocess.check_output(cmd, env=os.environ) - print("result is: "+context.result+"\n") - -@then(u'a user receives {status} response of [{response}] from the couchDB container') -def step_impl(context, status, response): - print("response is: "+response) - if status == "success": - assert "error" not in context.result, "Error, recieved unexpected error message from CouchDB container: "+context.result - elif status == "error": - assert "error" in context.result, "Error, recieved unexpected message with no error from CouchDB container: "+context.result - else: - assert False, "Error: Unknown response type defined in feature file." - - assert response in context.result, "Error, recieved unexpected response from CouchDB container: "+context.result - -@then(u'a user receives {status} response of {response} from the initial leader peer of "{org}"') -def step_impl(context, response, org, status): - expected_impl(context, response, context.interface.get_initial_leader(context, org)) - -@then(u'a user receives {status} response of {response} from the initial non-leader peer of "{org}"') -def step_impl(context, response, org, status): - expected_impl(context, response, context.interface.get_initial_non_leader(context, org)) - -@then(u'a user receives {status} response of {response} from "{peer}"') -def expected_impl(context, response, peer, status="a success"): - assert peer in context.result, "There is no response from {0}".format(peer) - if status == "a success": - assert str(context.result[peer].strip()) == str(response.strip()), \ - "Expected response was {0}; received {1}".format(response, - context.result[peer]) - elif status == "an error": - assert "Error:" in context.result[peer], "There was not an error response: {0}".format(context.result[peer]) - assert response in context.result[peer], "Expected response was {0}; received {1}".format(response, context.result[peer]) - else: - assert False, "Unknown response type: {}. Please choose success or error".format(status) - -@then(u'a user receives {status} response of {response}') -def step_impl(context, response, status="a success"): - expected_impl(context, response, "peer0.org1.example.com", status) - -@then(u'a user receives a response with the {valueType} value from "{peer}"') -def set_response_impl(context, valueType, peer): - assert peer in context.result, "There is no response from {0}".format(peer) - assert "Error endorsing query" not in context.result[peer], "There was an error response: {0}".format(context.result[peer]) - if valueType == "length": - assert len(context.result[peer].replace('\n', '').replace('"', '')) == context.payload["len"], \ - "Expected response to be of length {0}; received length {1}; Result: {2}".format(context.payload["len"], - len(context.result[peer]), - context.result[peer]) - elif valueType == "random": - assert context.payload["payload"] in context.result[peer], \ - "Expected response does not match the actual response; Result: {0}".format(context.result[peer]) - else: - assert False, "Unknown value type {}. This type may need to be implemented in the framework.".format(valueType) - -@then(u'a user receives a response with the {valueType} value') -def step_impl(context, valueType): - set_response_impl(context, valueType, "peer0.org1.example.com") - -@then(u'a user receives a response containing a value of length {length:d} from "{peer}"') -def length_impl(context, length, peer): - assert peer in context.result, "There is no response from {0}".format(peer) - assert "Error endorsing query" not in context.result[peer], "There was an error response: {0}".format(context.result[peer]) - assert len(context.result[peer].replace('\n', '').replace('"', '')) == length, \ - "Expected response to be of length {0}; received length {1}; Result: {2}".format(length, - len(context.result[peer]), - context.result[peer]) - -@then(u'a user receives a response containing a value of length {length:d}') -def step_impl(context, length): - length_impl(context, length, "peer0.org1.example.com") - -@then(u'a user receives a response containing {response} from "{peer}"') -def containing_impl(context, response, peer): - assert peer in context.result, "There is no response from {0}".format(peer) - if type(response) == type(context.result[peer]): - assert response in context.result[peer], u"Expected response was {0}; received {1}".format(response, context.result[peer]) - else: - assert str(response) in context.result[peer], "Expected response was {0}; received {1}".format(response, context.result[peer]) - -@then(u'a user receives a response containing {response}') -@then(u'an admin receives a response containing {response}') -def step_impl(context, response): - containing_impl(context, response, "peer0.org1.example.com") - -@then(u'a user receives a response not containing {response} from "{peer}"') -def not_containing_impl(context, response, peer): - assert peer in context.result, "There is no response from {0}".format(peer) - assert response not in context.result[peer], "Received response {0} (Expected it to NOT contain {1})".format(context.result[peer], response) - -@then(u'a user receives a response not containing {response}') -def step_impl(context, response): - not_containing_impl(context, response, "peer0.org1.example.com") - -@then(u'the "{fileName}" file is fetched from peer "{peer}" at location "{location}"') -def block_found_impl(context, fileName, peer, location=None): - if location is None: - location = "/var/hyperledger/configs/{0}".format(context.projectName) - - output = context.composition.docker_exec(["ls", location], [peer]) - assert fileName in output[peer], "The channel block file has not been fetched" - -@then(u'the config block file is fetched from peer "{peer}" at location "{location}"') -def step_impl(context, peer, location): - block_found_impl(context, context.interface.TEST_CHANNEL_ID, peer, location) - -@then(u'the config block file is fetched from peer "{peer}"') -def step_impl(context, peer): - block_found_impl(context, context.interface.TEST_CHANNEL_ID, peer) - -@then(u'the "{fileName}" file is fetched from peer "{peer}"') -def step_impl(context, fileName, peer): - info = fileName.split('.') - block_found_impl(context, info[0], peer, None) diff --git a/app/platform/fabric/e2e-test/feature/steps/endorser_util.py b/app/platform/fabric/e2e-test/feature/steps/endorser_util.py deleted file mode 100644 index 02ee03004..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/endorser_util.py +++ /dev/null @@ -1,929 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -import config_util -import json -import yaml -import os -import re -import remote_util -import shutil -import subprocess -import sys -import time -import common_util - -try: - pbFilePath = "../feature" - sys.path.insert(0, pbFilePath) - from peer import chaincode_pb2 -except: - print("ERROR! Failed to import the protobuf libraries chaincode_pb2 from the ../feature/peer/ directory: {0}".format(sys.exc_info()[0])) - sys.exit(1) - -# The default channel ID -SYS_CHANNEL_ID = "behavesyschan" -TEST_CHANNEL_ID = "behavesystest" - - -class InterfaceBase: - # The default channel ID - SYS_CHANNEL_ID = "behavesyschan" - TEST_CHANNEL_ID = "behavesystest" - - def get_orderers(self, context): - orderers = [] - for container in context.composition.collectServiceNames(): - if container.startswith("orderer"): - orderers.append(container) - return orderers - - def get_peers(self, context): - peers = [] - for container in context.composition.collectServiceNames(): - if container.startswith("peer"): - peers.append(container) - return peers - - def deploy_chaincode(self, context, path, args, name, language, peer, username, timeout, channel=TEST_CHANNEL_ID, version=0, policy=None): - self.pre_deploy_chaincode(context, path, args, name, language, channel, version, policy) - all_peers = self.get_peers(context) - self.install_chaincode(context, all_peers, username) - self.instantiate_chaincode(context, peer, username) - self.post_deploy_chaincode(context, peer, timeout) - - def pre_deploy_chaincode(self, context, path, args, name, language, channelId=TEST_CHANNEL_ID, version=0, policy=None): - orderers = self.get_orderers(context) - peers = self.get_peers(context) - assert orderers != [], "There are no active orderers in this network" - - context.chaincode={"path": path, - "language": language, - "name": name, - "version": str(version), - "args": args, - "orderers": orderers, - "channelID": channelId, - } - if policy: - context.chaincode['policy'] = policy - - def post_deploy_chaincode(self, context, peer, timeout): - chaincode_container = "{0}-{1}-{2}-{3}".format(context.projectName, - peer, - context.chaincode['name'], - context.chaincode.get("version", 0)) - context.interface.wait_for_deploy_completion(context, chaincode_container, timeout) - - def channel_block_present(self, context, containers, channelId): - ret = False - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - output = context.composition.docker_exec(["ls", configDir], containers) - for container in containers: - if "{0}.tx".format(channelId) in output[container]: - ret |= True - print("Channel Block Present Result {0}".format(ret)) - return ret - - def get_initial_leader(self, context, org): - if not hasattr(context, 'initial_leader'): - context.initial_leader={} - if org in context.initial_leader: - return context.initial_leader[org] - max_waittime=15 - waittime=5 - try: - with common_util.Timeout(max_waittime): - while org not in context.initial_leader: - for container in self.get_peers(context): - if ((org in container) and common_util.get_leadership_status(container)): - context.initial_leader[org]=container - print("initial leader is "+context.initial_leader[org]) - break - time.sleep(waittime) - finally: - assert org in context.initial_leader, "Error: After polling for " + str(max_waittime) + " seconds, no gossip-leader found by looking at the logs, for "+org - return context.initial_leader[org] - - def get_initial_non_leader(self, context, org): - if not hasattr(context, 'initial_non_leader'): - context.initial_non_leader={} - if org in context.initial_non_leader: - return context.initial_non_leader[org] - if org not in context.initial_non_leader: - for container in self.get_peers(context): - if (org in container and (not common_util.get_leadership_status(container))): - context.initial_non_leader[org]=container - print("initial non-leader is "+context.initial_non_leader[org]) - return context.initial_non_leader[org] - assert org in context.initial_non_leader, "Error: After polling for " + str(max_waittime) + " seconds, no gossip-non-leader found by looking at the logs, for "+org - return context.initial_non_leader[org] - - def find_replace_multi_ordered(self, string, dictionary): - # sort keys by length, in reverse order - for item in sorted(dictionary.keys(), key = len, reverse = True): - string = re.sub(item, str(dictionary[item]), string) - return string - - def wait_for_deploy_completion(self, context, chaincode_container, timeout): - pass - - def install_chaincode(self, context, peers, user="Admin"): - return self.cli.install_chaincode(context, peers, user=user) - - def instantiate_chaincode(self, context, peer, user="Admin"): - return self.cli.instantiate_chaincode(context, peer, user=user) - - def create_channel(self, context, orderer, channelId, user="Admin"): - return self.cli.create_channel(context, orderer, channelId, user=user) - - def fetch_channel(self, context, peers, orderer, channelId=TEST_CHANNEL_ID, location=None, user="Admin", ext=""): - return self.cli.fetch_channel(context, peers, orderer, channelId, location, user=user) - - def join_channel(self, context, peers, channelId, user="Admin"): - return self.cli.join_channel(context, peers, channelId, user=user) - - def invoke_chaincode(self, context, chaincode, orderer, peer, channelId, targs="", user="User1", opts={}): - # targs, user and opts are optional parameters with defaults set if they are not included - return self.cli.invoke_chaincode(context, chaincode, orderer, peer, channelId, targs, user, opts) - - def query_chaincode(self, context, chaincode, peer, channelId, targs="", user="User1", opts={}): - # targs and user are optional parameters with defaults set if they are not included - return self.cli.query_chaincode(context, chaincode, peer, channelId, targs, user) - - def enrollUsersFabricCA(self, context): - return self.cli.enrollUsersFabricCA(context) - - def addIdemixIdentities(self, context, user, passwd, role, org): - return self.cli.addIdemixIdentities(context, user, passwd, role, org) - - def enrollCAadmin(self, context, nodes): - return self.cli.enrollCAadmin(context, nodes) - - -class ToolInterface(InterfaceBase): - def __init__(self, context): - remote_util.getNetworkDetails(context) - - # use CLI for non implemented functions - self.cli = CLIInterface() - - def install_chaincode(self, context, peers, user="Admin"): - results = {} - for peer in peers: - peer_name = context.networkInfo["nodes"][peer]["nodeName"] - cmd = "node v1.0_sdk_tests/app.js installcc -i {0} -v 1 -p {1}".format(context.chaincode['name'], - peer_name) - print(cmd) - results[peer] = subprocess.check_call(cmd.split(), env=os.environ) - return results - - def instantiate_chaincode(self, context, peer="peer0.org1.example.com", user="Admin"): - channel = str(context.chaincode.get('channelID', self.TEST_CHANNEL_ID)) - args = json.loads(context.chaincode["args"]) - print(args) - peer_name = context.networkInfo["nodes"][peer]["nodeName"] - cmd = "node v1.0_sdk_tests/app.js instantiatecc -c {0} -i {1} -v 1 -a {2} -b {3} -p {4}".format(channel, - context.chaincode["name"], - args[2], - args[4], - peer_name) - print(cmd) - return subprocess.check_call(cmd.split(), env=os.environ) - - def create_channel(self, context, orderer, channelId, user="Admin"): - orderer_name = context.networkInfo["nodes"][orderer]["nodeName"] - peer_name = context.networkInfo["nodes"]["peer0.org1.example.com"]["nodeName"] - - # Config Setup for tool - cmd = "node v1.0_sdk_tests/app.js configtxn -c {0} -r {1}".format(channelId, "1,3") - ret = subprocess.check_call(cmd.split(), env=os.environ) - shutil.copyfile("{}.pb".format(channelId), "v1.0_sdk_tests/{}.pb".format(channelId)) - - cmd = "node v1.0_sdk_tests/app.js createchannel -c {0} -o {1} -r {2} -p {3}".format(channelId, - orderer_name, - "1,3", - peer_name) - print(cmd) - return subprocess.check_call(cmd.split(), env=os.environ) - - def join_channel(self, context, peers, channelId, user="Admin"): - results = {} - for peer in peers: - peer_name = context.networkInfo["nodes"][peer]["nodeName"] - cmd = "node v1.0_sdk_tests/app.js joinchannel -c {0} -p {1}".format(channelId, peer_name) - print(cmd) - results[peer] = subprocess.check_call(cmd.split(), env=os.environ) - return results - - def invoke_chaincode(self, context, chaincode, orderer, peer, channelId, targs="", user="User1", opts={}): - # targs, usesr and opts are optional parameters with defaults set if they are not included - args = json.loads(chaincode["args"]) - peer_name = context.networkInfo["nodes"][peer]["nodeName"] - cmd = "node v1.0_sdk_tests/app.js invoke -c {0} -i {1} -v 1 -p {2} -m {3}".format(channelId, - chaincode["name"], - peer_name, - args[-1]) - print(cmd) - return {peer: subprocess.check_call(cmd.split(), env=os.environ)} - - def query_chaincode(self, context, chaincode, peer, channelId, targs="", user="User1", opts={}): - # targs and user are optional parameters with defaults set if they are not included - peer_name = context.networkInfo["nodes"][peer]["nodeName"] - cmd = "node v1.0_sdk_tests/app.js query -c {0} -i {1} -v 1 -p {2}".format(channelId, - chaincode["name"], - peer_name) - print(cmd) - return {peer: subprocess.check_call(cmd.split(), env=os.environ)} - - def update_chaincode(self, context, chaincode, peer, channelId, user="Admin"): - peer_name = context.networkInfo["nodes"][peer]["nodeName"] - - -class SDKInterface(InterfaceBase): - def __init__(self, context, language): - if context.remote: - remote_util.getNetwork() - self.networkConfigFile = self.generateNetworkConfig(context) - - # use CLI for non implemented functions - self.cli = CLIInterface() - self.context = context - - if language.lower() == "nodejs": - self.initializeNode() - elif language.lower() == "java": - self.initializeJava() - else: - raise "Language {} is not supported in the test framework yet.".format(language) - - def generateNetworkConfig(self, context): - with open("./configs/network-config.json", "r") as fd: - networkConfig = fd.read() - - grpcType = "grpc" - proto = "http" - if context.tls: - grpcType = "grpcs" - proto = "https" - networkConfigFile = "{0}/configs/{1}/network-config.json".format(os.path.abspath('.'), - context.projectName) - - with open("{1}/configs/{0}/ordererOrganizations/example.com/ca/ca.example.com-cert.pem".format(context.projectName, os.path.abspath('.')), "r") as fd: - certs = fd.read().replace("\n", "\\r\\n") - - for org in ["org1.example.com", "org2.example.com"]: - with open("{2}/configs/{0}/peerOrganizations/{1}/ca/ca.{1}-cert.pem".format(context.projectName, org, os.path.abspath('.')), "r") as fd: - certs += fd.read().replace("\n", "\\r\\n") - - with open(networkConfigFile, "w+") as fd: - structure = {"config": "{0}/configs/{1}".format(os.path.abspath('.'), - context.projectName), - "tls": common_util.convertBoolean(context.tls), - "grpcType": grpcType, - "proto": proto, - "cacerts": certs, - "networkId": context.projectName} - updated = json.loads(networkConfig % (structure)) - fd.write(json.dumps(updated, indent=2)) - return networkConfigFile - - def initializeNode(self): - self.__class__ = NodeSDKInterface - self.inputFile = "commandInputs.json" - - def initializeJava(self): - self.__class__ = JavaSDKInterface - whichJava = subprocess.check_output(["which java"], - env=os.environ, - shell=True) - print("***{}***".format(whichJava.strip())) - javaVers = subprocess.check_output(["java -version"], - env=os.environ, - shell=True) - print("***{}***".format(javaVers)) - javaVers = subprocess.check_output(["ls -ltr "], - env=os.environ, - shell=True) - print("***{}***".format(javaVers)) - - def reformat_chaincode(self, chaincode, channelId): - reformatted = yaml.safe_load(chaincode.get('args', '[]')) - function = reformatted.pop(0) - chaincode['fcn'] = str(function) - chaincode['args'] = reformatted - chaincode['channelId'] = str(channelId) - return chaincode - - def invoke_chaincode(self, context, chaincode, orderer, peer, channelId=TEST_CHANNEL_ID, targs="", user="User1", opts={}): - # channelId, targs and user are optional parameters with defaults set if they are not included - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - result = self.invoke_func(chaincode, channelId, user, org, [peer], orderer, opts) - print("Invoke: {}".format(result)) - return {peer: result} - - def query_chaincode(self, context, chaincode, peer, channelId=TEST_CHANNEL_ID, targs="", user="User1", opts={}): - # targs and user are optional parameters with defaults set if they are not included - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - print("Class:", self.__class__) - result = self.query_func(chaincode, channelId, user, org, [peer], opts) - print("Query Result: {}".format(result)) - return {peer: result} - - def wait_for_deploy_completion(self, context, chaincode_container, timeout): - if context.remote: - time.sleep(30) - - containers = subprocess.check_output(["docker ps -a"], shell=True) - try: - with common_util.Timeout(timeout): - while chaincode_container not in containers: - containers = subprocess.check_output(["docker ps -a"], shell=True) - time.sleep(1) - finally: - assert chaincode_container in containers, "The expected chaincode container {0} is not running\n{1}".format(chaincode_container, containers) - - # Allow time for chaincode initialization to complete - time.sleep(15) - - -class NodeSDKInterface(SDKInterface): - def invoke_func(self, chaincode, channelId, user, org, peers, orderer, opts): - reformatted = self.reformat_chaincode(chaincode, channelId) - print("Chaincode", chaincode) - orgName = org.title().replace('.', '') - - jsonArgs = {"user": user, "org": org, "orgName": orgName, "chaincode":reformatted, "peers": peers, "orderer": orderer, "networkConfigFile": self.networkConfigFile, "opts": opts} - with open(self.inputFile, "w") as fd: - json.dump(jsonArgs, fd) - cmd = "node ./sdk/node/invoke.js invoke ../../{0}".format(self.inputFile) - print("cmd: {0}".format(cmd)) - return subprocess.check_call(cmd, shell=True) - - def query_func(self, chaincode, channelId, user, org, peers, opts): - print("Chaincode", chaincode) - reformatted = self.reformat_chaincode(chaincode, channelId) - orgName = org.title().replace('.', '') - - jsonArgs = {"user": user, "org": org, "orgName": orgName, "chaincode": reformatted, "peers": peers, "networkConfigFile": self.networkConfigFile, "opts": opts} - - with open(self.inputFile, "w") as fd: - json.dump(jsonArgs, fd) - cmd = "node ./sdk/node/query.js query ../../{0}".format(self.inputFile) - - print("cmd: {0}".format(cmd)) - response = subprocess.check_output(cmd, shell=True) - regex = "\{.*response.*:\"(.*?)\"\}" - match = re.findall(regex, response, re.MULTILINE | re.DOTALL) - assert match, "No matching response within query result {}".format(response) - return match[0] - -class JavaSDKInterface(SDKInterface): - def invoke_func(self, chaincode, channelId, user, org, peers, orderer, opts): - print("Chaincode", chaincode) - result = {} - reformatted = self.reformat_chaincode(chaincode, channelId) - passInfo = self.context.users.get(user, None) - if passInfo is None: - if "Admin" in user: - password = "adminpw" - elif "User" in user: - password = "{}pw".format(user.lower()) - else: - password = passInfo["password"] - for peer in peers: - inputs = {'peer': peer, - 'org': org, - 'orgName': org.title().replace('.', ''), - 'user': user, - 'password': password, - 'orderer': orderer, - 'config': "{0}/configs/{1}".format(os.path.abspath('.'), self.context.projectName), - #'cacert': "./configs/{0}/peerOrganizations/{1}/ca/ca.{1}-cert.pem".format(self.context.projectName, org), - 'cacert': "{1}/configs/{0}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp/tlscacerts/tlsca.example.com-cert.pem".format(self.context.projectName, os.path.abspath('.')), - 'srvcert': "./configs/{0}/peerOrganizations/{1}/peers/peer0.{1}/tls/server.crt".format(self.context.projectName, org), - 'channel': channelId, - 'name': chaincode.get("name", "mycc"), - 'func': reformatted["fcn"], - 'args': str(reformatted["args"]).replace(" ", ""), - } - invoke_inputs = '-n {peer} -i 127.0.0.1 -p 7051 -r {org} -c {config} -a {cacert} -s {srvcert} -d {orderer} -h {channel} -m {name} -f {func} -g {args} -u {user} -w {password}'.format(**inputs) - invoke_call = 'java -jar {0}/sdk/java/peer-javasdk.jar -o invoke {1}'.format(os.path.abspath('.'), invoke_inputs) - print("Invoke command::", invoke_call) - result[peer] = subprocess.check_output(invoke_call, shell=True) - return result - - def query_func(self, chaincode, channelId, user, org, peers, opts): - print("Chaincode", chaincode) - - result = {} - reformatted = self.reformat_chaincode(chaincode, channelId) - passInfo = self.context.users.get(user, None) - if passInfo is None: - if "Admin" in user: - password = "adminpw" - elif "User" in user: - password = "{}pw".format(user.lower()) - else: - password = passInfo["password"] - for peer in peers: - inputs = {'peer': peer, - 'org': org, - 'orgName': org.title().replace('.', ''), - 'user': user, - 'password': password, - 'orderer': "orderer0.example.com", - 'config': "{0}/configs/{1}".format(os.path.abspath('.'), self.context.projectName), - 'cacert': "{1}/configs/{0}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp/tlscacerts/tlsca.example.com-cert.pem".format(self.context.projectName, os.path.abspath('.')), - 'srvcert': "{2}/configs/{0}/peerOrganizations/{1}/peers/peer0.{1}/tls/server.crt".format(self.context.projectName, org, os.path.abspath('.')), - 'channel': channelId, - 'name': chaincode.get("name", "mycc"), - 'func': reformatted["fcn"], - 'args': reformatted["args"], - } - print("Inputs", inputs) - query_inputs = '-n {peer} -i 127.0.0.1 -p 7051 -r {org} -c {config} -a {cacert} -s {srvcert} -d {orderer} -h {channel} -m {name} -f {func} -g {args} -u {user} -w {password}'.format(**inputs) - query_call = 'java -jar {0}/sdk/java/peer-javasdk.jar -o query {1}'.format(os.path.abspath('.'), query_inputs) - print("Query command::", query_call) - answer = subprocess.check_output(query_call, shell=True) - print("answer:", answer.split("\n")[-3:]) - result[peer] = "\n".join(answer.split("\n")[-2:]) - # Only return the last bit of the query response - return "\n".join(answer.split("\n")[-2:]) - - -class CLIInterface(InterfaceBase): - - def get_env_vars(self, context, peer="peer0.org1.example.com", user="Admin", includeAll=True): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - setup = ["sh", "-c", - '"CORE_PEER_MSPCONFIGPATH={0}/peerOrganizations/{2}/users/{1}@{2}/msp'.format(configDir, user, org)] - - if includeAll: - setup += ['CORE_PEER_LOCALMSPID={0}'.format(org), - 'CORE_PEER_ID={0}'.format(peer), - 'CORE_PEER_ADDRESS={0}:7051'.format(peer)] - - # Only pull the env vars specific to the peer - if peer in context.composition.environ.keys(): - for key, value in context.composition.environ[peer].items(): - setup.append("{0}={1}".format(key, value)) - - if context.tls and "CORE_PEER_TLS_CERT_FILE" not in setup: - setup += ['CORE_PEER_TLS_ROOTCERT_FILE={0}/peerOrganizations/{1}/peers/{2}/tls/ca.crt'.format(configDir, org, peer), - 'CORE_PEER_TLS_CERT_FILE={0}/peerOrganizations/{1}/peers/{2}/tls/server.crt'.format(configDir, org, peer), - 'CORE_PEER_TLS_KEY_FILE={0}/peerOrganizations/{1}/peers/{2}/tls/server.key'.format(configDir, org, peer)] - - return setup - - def get_chaincode_deploy_spec(self, projectDir, ccType, path, name, args): - subprocess.call(["peer", "chaincode", "package", - "-n", name, - "-c", '{"Args":{0}}'.format(args), - "-p", path, - "configs/{0}/test.file".format(projectDir)], shell=True) - ccDeploymentSpec = chaincode_pb2.ChaincodeDeploymentSpec() - with open("test.file", 'rb') as f: - ccDeploymentSpec.ParseFromString(f.read()) - return ccDeploymentSpec - - def install_chaincode(self, context, peers, user="Admin"): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - output = {} - for peer in peers: - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - setup = self.get_env_vars(context, peer, user=user) - command = ["peer", "chaincode", "install", - "--name",context.chaincode['name'], - "--lang", context.chaincode['language'], - "--version", str(context.chaincode.get('version', 0)), - "--path", context.chaincode['path']] - if context.tls: - command = command + ["--tls", - "--cafile", - '{0}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp/tlscacerts/tlsca.example.com-cert.pem'.format(configDir)] - if hasattr(context, "mutual_tls") and context.mutual_tls: - command = command + ["--clientauth", - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user), - "--keyfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.key'.format(configDir, org, user)] - if "orderers" in context.chaincode: - command = command + ["--orderer", 'orderer0.example.com:7050'] - if "user" in context.chaincode: - command = command + ["--username", context.chaincode["user"]] - command.append('"') - ret = context.composition.docker_exec(setup+command, ['cli']) - assert "Error occurred" not in ret['cli'], "The install failed with the following error: {}".format(ret['cli']) - output[peer] = ret['cli'] - print("[{0}]: {1}".format(" ".join(setup + command), output)) - return output - - def instantiate_chaincode(self, context, peer="peer0.org1.example.com", user="Admin"): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - args = context.chaincode.get('args', '[]').replace('"', r'\"') - #output = {} - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - setup = self.get_env_vars(context, peer, user=user) - command = ["peer", "chaincode", "instantiate", - "--name", context.chaincode['name'], - "--version", str(context.chaincode.get('version', 0)), - "--lang", context.chaincode['language'], - "--channelID", str(context.chaincode.get('channelID', self.TEST_CHANNEL_ID)), - "--ctor", r"""'{\"Args\": %s}'""" % (args)] - if context.tls: - command = command + ["--tls", - common_util.convertBoolean(context.tls), - "--cafile", - '{0}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp/tlscacerts/tlsca.example.com-cert.pem'.format(configDir)] - if hasattr(context, "mutual_tls") and context.mutual_tls: - command = command + ["--clientauth", - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user), - "--keyfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.key'.format(configDir, org, user)] - if "orderers" in context.chaincode: - command = command + ["--orderer", 'orderer0.example.com:7050'] - if "user" in context.chaincode: - command = command + ["--username", context.chaincode["user"]] - if context.chaincode.get("policy", None) is not None: - command = command + ["--policy", context.chaincode["policy"].replace('"', r'\"')] - command.append('"') - - #output[peer] = context.composition.docker_exec(setup + command, [peer]) - output = context.composition.docker_exec(setup + command, [peer]) - print("[{0}]: {1}".format(" ".join(setup + command), output)) - return output - - def create_channel(self, context, orderer, channelId=TEST_CHANNEL_ID, user="Admin"): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - setup = self.get_env_vars(context, "peer0.org1.example.com", user=user) - # wait a bit for network to come up - time.sleep(30) - # Ideally this would NOT be a 5 or 3 minute timeout, but more like a 2 or 1 minute timeout. - timeout = 180 + common_util.convertToSeconds(context.composition.environ.get('CONFIGTX_ORDERER_BATCHTIMEOUT', '0s')) - command = ["peer", "channel", "create", - "--file", "/var/hyperledger/configs/{0}/{1}.tx".format(context.composition.projectName, channelId), - "--channelID", channelId, - "--timeout", "{}s".format(timeout), - "--orderer", '{0}:7050'.format(orderer)] - if context.tls: - command = command + ["--tls", - common_util.convertBoolean(context.tls), - "--cafile", - '{0}/ordererOrganizations/example.com/orderers/{1}/msp/tlscacerts/tlsca.example.com-cert.pem'.format(configDir, orderer)] - if hasattr(context, "mutual_tls") and context.mutual_tls: - org = "org1.example.com" - command = command + ["--clientauth", - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user), - "--keyfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.key'.format(configDir, org, user)] - - command.append('"') - - output = context.composition.docker_exec(setup+command, ['cli']) - print("[{0}]: {1}".format(" ".join(setup+command), output)) - if "SERVICE_UNAVAILABLE" in output['cli']: - time.sleep(5) - print("Received: {0}, Trying again...".format(output['cli'])) - output = context.composition.docker_exec(setup+command, ['cli']) - assert "Error:" not in output, "Unable to successfully create channel {}".format(channelId) - - return output - - def fetch_channel(self, context, peers, orderer, channelId=TEST_CHANNEL_ID, location=None, user="Admin", ext="", block=""): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - if not location: - location = configDir - - if not ext: - ext = "block" - - for peer in peers: - setup = self.get_env_vars(context, peer, includeAll=False, user=user) - command = ["peer", "channel", "fetch", "config"] - if block: - command = ["peer", "channel", "fetch", block] - command += ["{0}/{1}.{2}".format(location, channelId, ext), - "--channelID", channelId, - "--orderer", '{0}:7050'.format(orderer)] - if context.tls: - command = command + ["--tls", - "--cafile", - '{0}/ordererOrganizations/example.com/orderers/{1}/msp/tlscacerts/tlsca.example.com-cert.pem'.format(configDir, orderer)] - if hasattr(context, "mutual_tls") and context.mutual_tls: - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - command = command + ["--clientauth", - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user), - "--keyfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.key'.format(configDir, org, user)] - - command.append('"') - - output = context.composition.docker_exec(setup+command, [peer]) - print("[{0}]: {1}".format(" ".join(setup+command), output)) - return output - - def join_channel(self, context, peers, channelId=TEST_CHANNEL_ID, user="Admin"): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - - for peer in peers: - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - setup = self.get_env_vars(context, peer, user=user) - command = ["peer", "channel", "join", - "--blockpath", '/var/hyperledger/configs/{0}/{1}.block"'.format(context.composition.projectName, channelId)] - count = 0 - output = "Error" - - # Try joining the channel 5 times with a 2 second delay between tries - while count < 5 and "Error" in output: - output = context.composition.docker_exec(setup+command, [peer]) - time.sleep(2) - count = count + 1 - output = output[peer] - - print("[{0}]: {1}".format(" ".join(setup+command), output)) - assert "Error: genesis block file not found open " not in output, "Unable to find the genesis block file {0}.block".format(channelId) - - return output - - def update_channel(self, context, peers, channelId=TEST_CHANNEL_ID, orderer="orderer0.example.com", block_filename="update.pb", user="Admin"): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - - # peer channel update -f org3_update_in_envelope.pb -c $CHANNEL_NAME -o orderer.example.com:7050 --tls --cafile $ORDERER_CA - for peer in peers: - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - setup = self.get_env_vars(context, peer, includeAll=False, user=user) - command = ["peer", "channel", "update", - "--file", block_filename, - "--channelID", channelId, - "--orderer", '{0}:7050'.format(orderer)] - if context.tls: - command = command + ["--tls", - "--cafile", - '{0}/ordererOrganizations/example.com/orderers/{1}/msp/tlscacerts/tlsca.example.com-cert.pem'.format(configDir, orderer)] - if hasattr(context, "mutual_tls") and context.mutual_tls: - command = command + ["--clientauth", - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user), - "--keyfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.key'.format(configDir, org, user)] - - command.append('"') - output = context.composition.docker_exec(setup+command, [peer]) - print("[{0}]: {1}".format(" ".join(setup+command), output)) - return output - - def sign_channel(self, context, peers, block_filename="update.pb", user="Admin"): - # peer channel signconfigtx -f org3_update_in_envelope.pb - for peer in peers: - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - setup = self.get_env_vars(context, peer, user=user) - command = ["peer", "channel", "signconfigtx", - "--file", '{}"'.format(block_filename)] - output = context.composition.docker_exec(setup+command, [peer]) - print("[{0}]: {1}".format(" ".join(setup+command), output)) - return output - - def upgrade_chaincode(self, context, orderer, peer, channelId=TEST_CHANNEL_ID, user="Admin"): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - setup = self.get_env_vars(context, peer, user=user) - command = ["peer", "chaincode", "upgrade", - "--name", context.chaincode['name'], - "--version", str(context.chaincode.get('version', 1)), - "--channelID", str(context.chaincode.get('channelID', channelId))] - if context.chaincode["args"]: - command = command + ["--ctor", r"""'{\"Args\": %s}'""" % (str(context.chaincode["args"].replace('"', r'\"')))] - if context.tls: - command = command + ["--tls", - "--cafile", - '{0}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp/tlscacerts/tlsca.example.com-cert.pem'.format(configDir)] - if hasattr(context, "mutual_tls") and context.mutual_tls: - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - command = command + ["--clientauth", - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user), - "--keyfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.key'.format(configDir, org, user)] - if "orderers" in context.chaincode: - command = command + ["--orderer", '{}:7050'.format(orderer)] - if "user" in context.chaincode: - command = command + ["--username", context.chaincode["user"]] - if context.chaincode.get("policy", None) is not None: - command = command + ["--policy", context.chaincode["policy"].replace('"', r'\"')] - - command.append('"') - output = context.composition.docker_exec(setup+command, ['peer0.org1.example.com']) - print("[{0}]: {1}".format(" ".join(setup + command), output)) - return output - - def invoke_chaincode(self, context, chaincode, orderer, peer, channelId=TEST_CHANNEL_ID, targs="", user="User1", opts={}): - # channelId, targs, user and opts are optional parameters with defaults set if they are not included - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - args = chaincode.get('args', '[]').replace('"', r'\"') - setup = self.get_env_vars(context, peer, user=user) - command = ["peer", "chaincode", "invoke", - "--name", chaincode['name'], - "--ctor", r"""'{\"Args\": %s}'""" % (args), - "--channelID", channelId] - if context.tls: - command = command + ["--tls", - "--cafile", - '{0}/ordererOrganizations/example.com/orderers/orderer0.example.com/msp/tlscacerts/tlsca.example.com-cert.pem'.format(configDir)] - if hasattr(context, "mutual_tls") and context.mutual_tls: - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - command = command + ["--clientauth", - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user), - "--keyfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.key'.format(configDir, org, user)] - if targs: - #to escape " so that targs are compatible with cli command - targs = targs.replace('"', r'\"') - command = command + ["--transient", targs] - - command = command + ["--orderer", '{0}:7050'.format(orderer)] - command.append('"') - output = context.composition.docker_exec(setup+command, [peer]) - print("Invoke[{0}]: {1}".format(" ".join(setup+command), str(output))) - output = self.retry(context, output, peer, setup, command) - return output - - def query_chaincode(self, context, chaincode, peer, channelId=TEST_CHANNEL_ID, targs="", user="User1", opts={}): - # channelId, targs and user are optional parameters with defaults set if they are not included - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - args = chaincode.get('args', '[]').replace('"', r'\"') - setup = self.get_env_vars(context, peer, user=user) - command = ["peer", "chaincode", "query", - "--name", chaincode['name'], - "--ctor", r"""'{\"Args\": %s}'""" % (str(args)), # This should work for rich queries as well - "--channelID", channelId] - if targs: - #to escape " so that targs are compatible with cli command - targs = targs.replace('"', r'\"') - command = command +["--transient", targs] - - if context.tls: - command = command + ["--tls", - "--cafile", - '{0}/peerOrganizations/{1}/tlsca/tlsca.{1}-cert.pem'.format(configDir, org), - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user)] - command.append('"') - result = context.composition.docker_exec(setup+command, [peer]) - print("Query Exec command: {0}".format(" ".join(setup+command))) - result = self.retry(context, result, peer, setup, command) - print("Query Result: {0}".format(result)) - return result - - def enrollCAadmin(self, context, nodes): - proto = "http" - if context.tls: - proto = "https" - - for node in nodes: - org = node.split(".", 1)[1] - userpass = context.composition.getEnvFromContainer("ca.{}".format(org), 'BOOTSTRAP_USER_PASS') - url = "{2}://{0}@ca.{1}:7054".format(userpass, org, proto) - output = context.composition.docker_exec(["fabric-ca-client enroll -d -u {0} -M /var/hyperledger/msp --caname ca.{1} --csr.cn ca.{1} --tls.certfiles /var/hyperledger/msp/cacerts/ca.{1}-cert.pem".format(url, org)], [node]) - print("Output Enroll: {}".format(output)) - - def registerUser(self, context, user, org, passwd, role, peer): - command = "fabric-ca-client register -d --id.name {0} --id.secret {2} --tls.certfiles /var/hyperledger/msp/cacerts/ca.{1}-cert.pem".format(user, org, passwd) - if role.lower() == u'admin': - command += ''' --id.attrs '"hf.Registrar.Roles=peer,client"' --id.attrs hf.Registrar.Attributes=*,hf.Revoker=true,hf.GenCRL=true,admin=true:ecert''' - - context.composition.environ["FABRIC_CA_CLIENT_HOME"] = "/var/hyperledger/users/{0}@{1}".format(user, org) - output = context.composition.docker_exec([command], [peer]) - print("user register: {}".format(output)) - - def enrollUser(self, context, user, org, passwd, enrollType, peer): - fca = 'ca.{}'.format(org) - proto = "http" - if context.tls: - proto = "https" - - adminUser = context.composition.getEnvFromContainer(fca, "BOOTSTRAP_USER_PASS") - command = "fabric-ca-client enroll -d --enrollment.profile tls -u {7}://{0}:{1}@{3}:7054 -M /var/hyperledger/users/{0}@{2}/tls --csr.hosts {4} --enrollment.type {5} --tls.certfiles /var/hyperledger/configs/{6}/peerOrganizations/{2}/ca/ca.{2}-cert.pem".format(user, passwd, org, fca, peer, enrollType, context.projectName, proto) - output = context.composition.docker_exec([command], [peer]) - print("Output: {}".format(output)) - - command = "fabric-ca-client certificate list -d --id {0} --store /var/hyperledger/users/{0}@{1}/tls/ --caname {3} --csr.cn {3} --tls.certfiles /var/hyperledger/configs/{2}/peerOrganizations/{1}/ca/ca.{1}-cert.pem".format(user, org, context.projectName, fca) - output = context.composition.docker_exec([command], [peer]) - print("Cert Output: {}".format(output)) - - def enrollUsersFabricCA(self, context): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - for user in context.users.keys(): - org = context.users[user]['organization'] - passwd = context.users[user]['password'] - role = context.users[user].get('role', "user") - enrollType = context.users[user].get('certType', "x509") - peer = 'peer0.{}'.format(org) - - # Enroll (login) admin first - self.enrollCAadmin(context, [peer]) - - self.registerUser(context, user, org, passwd, role, peer) - self.enrollUser(context, user, org, passwd, enrollType, peer) - if enrollType == u'idemix': - self.addIdemixIdentities(context, user, passwd, role, org) - - # Place the certificates in the set directory structure - self.placeCertsInDirStruct(context, user, org, peer) - - def placeCertsInDirStruct(self, context, user, org, peer): - fca = 'ca.{}'.format(org) - proto = "http" - if context.tls: - proto = "https" - - # Ensure that the owner of all of the user directories are the same - print("Checking file ownership: /var/hyperledger/users/{0}@{1} ...".format(user, org)) - output = context.composition.docker_exec(['stat -c "%u %g" /var/hyperledger/users/Admin@{0}'.format(org)], [peer]) - out = output[peer].strip().split(" ") - print("Existing stat:: {}".format(out)) - output = context.composition.docker_exec(['stat -c "%u %g" /var/hyperledger/users/{0}@{1}'.format(user, org)], [peer]) - new = output[peer].strip().split(" ") - print("New stat:: {}".format(new)) - if new[0] != out[0]: - context.printEnvWarning = True - output = context.composition.docker_exec(['chown -R {2}:{3} /var/hyperledger/users/{0}@{1}'.format(user, org, out[0], out[1])], [peer]) - - os.mkdir("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/msp".format(user, org, context.projectName)) - os.mkdir("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/msp/signcerts".format(user, org, context.projectName)) - os.mkdir("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/msp/keystore".format(user, org, context.projectName)) - os.mkdir("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/msp/admincerts".format(user, org, context.projectName)) - - shutil.copy("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/tls/{0}.pem".format(user, org, context.projectName), - "configs/{2}/peerOrganizations/{1}/users/{0}@{1}/tls/client.crt".format(user, org, context.projectName)) - shutil.copy("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/tls/signcerts/cert.pem".format(user, org, context.projectName), - "configs/{2}/peerOrganizations/{1}/users/{0}@{1}/msp/signcerts/{0}@{1}-cert.pem".format(user, org, context.projectName)) - keyfile = os.listdir("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/tls/keystore/".format(user, org, context.projectName))[0] - shutil.copy("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/tls/keystore/{3}".format(user, org, context.projectName, keyfile), - "configs/{2}/peerOrganizations/{1}/users/{0}@{1}/tls/client.key".format(user, org, context.projectName)) - shutil.copy("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/tls/keystore/{3}".format(user, org, context.projectName, keyfile), - "configs/{2}/peerOrganizations/{1}/users/{0}@{1}/msp/keystore/{3}".format(user, org, context.projectName, keyfile)) - - shutil.copy("configs/{2}/peerOrganizations/{1}/users/{0}@{1}/msp/signcerts/{0}@{1}-cert.pem".format(user, org, context.projectName), - "configs/{2}/peerOrganizations/{1}/users/{0}@{1}/msp/admincerts/{0}@{1}-cert.pem".format(user, org, context.projectName)) - - command = "fabric-ca-client getcacert -d -u {3}://{0}:7054 -M /var/hyperledger/users/{1}@{2}/msp --tls.certfiles /var/hyperledger/msp/cacerts/ca.{2}-cert.pem".format(fca, user, org, proto) - output = context.composition.docker_exec([command], [peer]) - print("CACert Output: {}".format(output)) - output = context.composition.docker_exec(['chown -R {2}:{3} /var/hyperledger/users/{0}@{1}'.format(user, org, out[0], out[1])], [peer]) - - def addIdemixIdentities(self, context, user, passwd, role, org): - peer = 'peer0.{}'.format(org) - d = {"passwd": passwd, "role": role, "org": org, "username": user, "attrib": [{"name": "hf.Revoker", "value": "true"}]} - if role.lower() == u'admin': - d["attrib"].append({"name": "admin", "value": "true:ecert"}) - commandStr = "fabric-ca-client identity add {0} --json '{\"secret\": \"passwd\", \"type\": \"user\", \"affiliation\": \"org\", \"max_enrollments\": 1, \"attrs\": attrib}' --id.name username --id.secret passwd --tls.certfiles /var/hyperledger/msp/cacerts/ca.org-cert.pem" - command = self.find_replace_multi_ordered(commandStr, d) - output = context.composition.docker_exec([command], [peer]) - print("Idemix Output: {}".format(output)) - - output = context.composition.docker_exec(["fabric-ca-client identity list"], [peer]) - print("Ident List: {}".format(output)) - - def wait_for_deploy_completion(self, context, chaincode_container, timeout): - containers = subprocess.check_output(["docker ps -a"], shell=True) - try: - with common_util.Timeout(timeout): - while chaincode_container not in containers: - containers = subprocess.check_output(["docker ps -a"], shell=True) - time.sleep(1) - finally: - assert chaincode_container in containers, "The expected chaincode container {0} is not running\n{1}".format(chaincode_container, containers) - - # Allow time for chaincode initialization to complete - time.sleep(10) - - def retry(self, context, output, peer, setup, command): - count = 0 - while count < 3: - count += 1 - if "been successfully instantiated and try again" in output[peer]: - time.sleep(5) - print("Received: {0}, Trying again({1})...".format(output[peer], count)) - output = context.composition.docker_exec(setup+command, [peer]) - return output diff --git a/app/platform/fabric/e2e-test/feature/steps/explorer_impl.py b/app/platform/fabric/e2e-test/feature/steps/explorer_impl.py deleted file mode 100644 index 570ac9291..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/explorer_impl.py +++ /dev/null @@ -1,265 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 - -from behave_rest.steps import * -from behave import * -import time -import os -import sys -import uuid -import basic_impl -import compose_util -import common_util -import config_util -import shutil -import subprocess -import requests -import json_responses - -FNULL = open(os.devnull, 'w') - -@given(u'For explorer env, I have a bootstrapped fabric network of type {ordererType}') -def step_impl(context, ordererType): - config_util.PROFILE_TYPES.update({"kafka-sd": "SampleInsecureKafka"}) - config_util.ORDERER_TYPES.append("kafka-sd") - basic_impl.bootstrapped_impl(context, ordererType, "leveldb", False) - -@when(u'I start explorer') -def start_explorer_impl(context): - try: - testConfigs = config_util.makeProjectConfigDir(context) - updated_env = config_util.updateEnviron(context) - cmd = ['find {0} -iregex \'.*_sk$\' -type f | xargs -I[] dirname [] | xargs -I[] bash -c \'pushd [] && ln -s *_sk sk && popd\''.format(testConfigs)] - subprocess.call(cmd, shell=True, env=updated_env) - except: - print("Unable to create symbolic link for secret keys: {0}".format(sys.exc_info()[1])) - - context.headers = {} - context.base_url = "" - context.json_responses = json_responses - - curpath = os.path.realpath('.') - composeFiles = ["%s/docker-compose/docker-compose-explorer.yaml" % (curpath)] - if not hasattr(context, "composition_explorer"): - context.composition_explorer = compose_util.Composition(context, composeFiles, - projectName=context.projectName, - startContainers=False) - else: - context.composition_explorer.composeFilesYaml = composeFiles - - if hasattr(context, "composition"): - env = context.composition.getEnv() - for key,value in env.items(): - context.composition_explorer.environ[key] = value - - context.composition_explorer.up() - - -@given(u'I start first-network') -@given(u'I start first-network orderer network of type {consensus_type}') -def start_firstnetwork_impl(context, consensus_type="solo"): - curpath = os.path.realpath('.') - composeFiles = ["%s/fabric-samples/first-network/docker-compose-explorer.yaml" % (curpath)] - config_util.makeProjectConfigDir(context) - - shutil.copyfile("{0}/fabric-samples/first-network/crypto-config.yaml".format(curpath), "{0}/configs/{1}/crypto-config.yaml".format(curpath, context.projectName)) - shutil.copyfile("{0}/fabric-samples/first-network/configtx.yaml".format(curpath), "{0}/configs/{1}/configtx.yaml".format(curpath, context.projectName)) - os.mkdir("{0}/configs/{1}/channel-artifacts".format(curpath, context.projectName)) - generateCryptoArtifacts(context, "mychannel", consensus_type) - - # In this step, composition will not be used, clear it once - if hasattr(context, "composition"): - del context.composition - - updated_env = config_util.updateEnviron(context) - updated_env["COMPOSE_PROJECT_NAME"] = context.projectName - updated_env["CORE_PEER_NETWORKID"] = context.projectName - - os.chdir("{0}/fabric-samples/first-network".format(curpath)) - - try: - command = ["./byfn.sh up -f docker-compose-explorer.yaml -c {0} -o {1}".format("mychannel", consensus_type)] - subprocess.call(command, shell=True, env=updated_env, stdout=FNULL) - except: - print("Failed npm install: {0}".format(sys.exc_info()[1])) - - os.chdir(curpath) - -def generateCryptoArtifacts(context, channelID, consensus_type): - curpath = os.path.realpath('.') - testConfigs = config_util.makeProjectConfigDir(context) - updated_env = config_util.updateEnviron(context) - try: - command = ["../../fabric-samples/first-network/byfn.sh", "generate", "-f", "docker-compose-explorer.yaml", "-c", channelID, "-o", consensus_type] - subprocess.call(command, cwd=testConfigs, env=updated_env, stderr=subprocess.STDOUT) - except: - print("Unable to generate crypto artifacts: {0}".format(sys.exc_info()[1])) - - try: - shutil.rmtree("{0}/fabric-samples/first-network/crypto-config".format(curpath), ignore_errors=True) - shutil.copytree("{0}/crypto-config".format(testConfigs), "{0}/fabric-samples/first-network/crypto-config".format(curpath)) - shutil.copytree("{0}/crypto-config/peerOrganizations".format(testConfigs), "{0}/peerOrganizations".format(testConfigs)) - shutil.copytree("{0}/crypto-config/ordererOrganizations".format(testConfigs), "{0}/ordererOrganizations".format(testConfigs)) - except: - print("Unable to copy crypto artifacts: {0}".format(sys.exc_info()[1])) - - try: - shutil.rmtree("{0}/fabric-samples/first-network/channel-artifacts".format(curpath), ignore_errors=True) - shutil.copytree("{0}/channel-artifacts".format(testConfigs), "{0}/fabric-samples/first-network/channel-artifacts".format(curpath)) - except: - print("Unable to copy channel artifacts: {0}".format(sys.exc_info()[1])) - - -@given(u'I start balance-transfer') -@given(u'I start balance-transfer orderer network of type {consensus_type}') -def start_balancetransfer_impl(context, consensus_type="solo"): - testConfigs = config_util.makeProjectConfigDir(context) - curpath = os.path.realpath('.') - shutil.copytree( - "%s/fabric-samples/balance-transfer/artifacts/channel/crypto-config/ordererOrganizations" % (curpath), - "%s/%s/ordererOrganizations" % (curpath, testConfigs) - ) - shutil.copytree( - "%s/fabric-samples/balance-transfer/artifacts/channel/crypto-config/peerOrganizations" % (curpath), - "%s/%s/peerOrganizations" % (curpath, testConfigs) - ) - - os.chdir("{0}/fabric-samples/balance-transfer".format(curpath)) - - # In this step, composition will not be used, clear it once - if hasattr(context, "composition"): - del context.composition - - updated_env = config_util.updateEnviron(context) - updated_env["COMPOSE_PROJECT_NAME"] = context.projectName - updated_env["CORE_PEER_NETWORKID"] = context.projectName - - try: - command = ["npm install --silent"] - subprocess.call(command, shell=True, env=updated_env, stdout=FNULL) - except: - print("Failed npm install: {0}".format(sys.exc_info()[1])) - - try: - command = ["./runApp.sh"] - p = subprocess.Popen(command, shell=True, env=updated_env, stdout=subprocess.PIPE) - except: - print("Failed to start application: {0}".format(sys.exc_info()[1])) - - while True: - line = p.stdout.readline() - if "SERVER STARTED" in line: - print(line) - break - else: - time.sleep(1) - - try: - command = ["./testAPIs.sh"] - subprocess.call(command, shell=True, env=updated_env, stdout=FNULL) - except: - print("Failed to exectute REST API: {0}".format(sys.exc_info()[1])) - - os.chdir(curpath) - -@step('I make a {request_verb} request to the following path segment') -def request_to_the_path_described_on_table(context, request_verb): - if not hasattr(context, 'verify_ssl'): - context.verify_ssl = True - - url = context.base_url - - for row in context.table: - for x in context.table.headings: - path = row[x] - if path.startswith("context") and path[8:] == "block_height": - # TODO messy code - # This attribute should be integer - url = url + '/' + str(getattr(context, path[8:]) - 1).encode('ascii') - elif path.startswith("context"): - url = url + '/' + str(getattr(context, path[8:])).encode('ascii') - else: - url = url + '/' + path - - context.r = getattr(requests, request_verb.lower())(url, headers=context.headers, verify=context.verify_ssl) - - log_full(context.r) - - return context.r - -@then(u'the explorer app logs contains "{data}" {count:d} time(s) within {timeout:d} seconds') -def step_impl(context, data, count, timeout): - time.sleep(float(timeout)) - data_count = is_in_log("explorer.mynetwork.com", data) - assert data_count == count, "The log didn't appear the expected number of times({0}).".format(data_count) - -@then(u'the explorer app logs contains "{data}" within {timeout:d} seconds') -def step_impl(context, data, timeout): - time.sleep(float(timeout)) - data_count = is_in_log("explorer.mynetwork.com", data) - assert data_count > 0, "The log didn't appear at all." - -@when(u'"{container}" is stopped') -def step_impl(context, container): - if hasattr(context, "composition") and hasattr(context, "composeFilesYaml"): - context.composition.stop([container]) - elif hasattr(context, "composition_explorer"): - context.composition_explorer.stop([container]) - else: - assert False, "Failed to stop container {0}".format(container) - -def is_in_log(container, keyText): - output = subprocess.check_output( - "docker exec " + container + " cat logs/app/app.log | grep " + "\"" + keyText + "\"" + " | wc -l", - shell=True) - return int(output) - -@step(u'Copy "{srcfile}" to "{dstfile}" on "{peer}"') -def start_explorer_impl(context, srcfile, dstfile, peer): - try: - testConfigs = config_util.makeProjectConfigDir(context) - updated_env = config_util.updateEnviron(context) - cmd = ['docker cp {0} {1}:{2}'.format(srcfile, peer, dstfile)] - subprocess.call(cmd, shell=True, env=updated_env) - except: - print("Unable to copy {0} on {1}: {2}".format(srcfile, peer, sys.exc_info()[1])) - -@step(u'Update "{peer}" of "{org}" as an anchor in "{channel}"') -def step_impl(context, peer, org, channel): - try: - testConfigs = config_util.makeProjectConfigDir(context) - updated_env = config_util.updateEnviron(context) - cmd = ['mkdir -p {0}/channel-artifacts'.format(testConfigs) ] - subprocess.call(cmd, shell=True, env=updated_env) - cmd = ['configtxgen -configPath {1} -profile {0} -outputAnchorPeersUpdate ./{1}/channel-artifacts/{2}_{3}anchor.tx -channelID {2} -asOrg {3}'.format(config_util.CHANNEL_PROFILE, testConfigs, channel, org)] - subprocess.call(cmd, shell=True, env=updated_env) - except: - print("Unable to create anchor tx file for {0} on {1}: {2}".format(peer, channel, sys.exc_info()[1])) - - update_anchor(context, peer, channel, tx_filename='channel-artifacts/{0}_{1}anchor.tx'.format(channel, org)) - -def update_anchor(context, peer, channelId="mychannel", orderer="orderer0.example.com", tx_filename="update.pb", user="Admin"): - configDir = "/var/hyperledger/configs/{0}".format(context.composition.projectName) - - # peer channel update -f org3_update_in_envelope.pb -c $CHANNEL_NAME -o orderer.example.com:7050 --tls --cafile $ORDERER_CA - peerParts = peer.split('.') - org = '.'.join(peerParts[1:]) - setup = context.interface.get_env_vars(context, peer, includeAll=False, user=user) - command = ["peer", "channel", "update", - "--file", '{0}/{1}'.format(configDir, tx_filename), - "--channelID", channelId, - "--orderer", '{0}:7050'.format(orderer)] - if context.tls: - command = command + ["--tls", - "--cafile", - '{0}/ordererOrganizations/example.com/orderers/{1}/msp/tlscacerts/tlsca.example.com-cert.pem'.format(configDir, orderer)] - if hasattr(context, "mutual_tls") and context.mutual_tls: - command = command + ["--clientauth", - "--certfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.crt'.format(configDir, org, user), - "--keyfile", - '{0}/peerOrganizations/{1}/users/{2}@{1}/tls/client.key'.format(configDir, org, user)] - - command.append('"') - output = context.composition.docker_exec(setup+command, [peer]) - print("[{0}]: {1}".format(" ".join(setup+command), output)) - return output diff --git a/app/platform/fabric/e2e-test/feature/steps/json_responses.py b/app/platform/fabric/e2e-test/feature/steps/json_responses.py deleted file mode 100644 index caeeb7da2..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/json_responses.py +++ /dev/null @@ -1,102 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 - -import trafaret as t - -""" -Example of using Trafaret (https://github.com/Deepwalker/trafaret) to describe json response structure - -Assuming our json is: - -{ - "access_token":"access_token", - "refresh_token":"refresh_token", - "token_type":"token_type", - "expires_in":1800 -} - -Our trafaret will be: - -tokenData = t.Dict({ - t.Key('access_token'): t.String, - t.Key('refresh_token'): t.String, - t.Key('token_type'): t.String, - t.Key('expires_in'): t.Int -}) -""" - -# Below are objects used for validating response in explorer.feature -userData = t.Dict({ - t.Key('message'): t.String, - t.Key('name'): t.String -}) - -networklistResp = t.Dict({ - t.Key('networkList'): t.List(t.List(t.String | t.Dict)) -}) - -loginResp = t.Dict({ - t.Key('status'): t.Int, - t.Key('success'): t.Bool, - t.Key('message'): t.String, - t.Key('token'): t.String, - t.Key('user'): userData -}) - -channelsResp = t.Dict({ - t.Key('status'): t.Int, - t.Key('channels'): t.List(t.String) -}) - -channelData = t.Dict({ - t.Key('id'): t.Int, - t.Key('channelname'): t.String, - t.Key('blocks'): t.Int, - t.Key('channel_genesis_hash'): t.String, - t.Key('transactions'): t.Int, - t.Key('createdat'): t.String, - t.Key('channel_hash'): t.String(allow_blank=True) -}) - -channelsInfoResp = t.Dict({ - t.Key('status'): t.Int, - t.Key('channels'): t.List(channelData) -}) - -blockResp = t.Dict({ - t.Key('status'): t.Int, - t.Key('number'): t.String, - t.Key('previous_hash'): t.String, - t.Key('data_hash'): t.String, - t.Key('transactions'): t.List(t.Any) -}) - -peersStatusResp = t.Dict({ - t.Key('status'): t.Int, - t.Key('peers'): t.List(t.Any) -}) - -blockData = t.Dict({ - t.Key('blocknum'): t.Int, - t.Key('txcount'): t.Int, - t.Key('datahash'): t.String, - t.Key('blockhash'): t.String, - t.Key('prehash'): t.String(allow_blank=True), - t.Key('createdt'): t.String, - t.Key('txhash'): t.List(t.String(allow_blank=True)), - t.Key('channelname'): t.String -}) - -blockactivityResp = t.Dict({ - t.Key('status'): t.Int, - t.Key('row'): t.List(blockData) -}) - -registerResp = t.Dict({ - t.Key('status'): t.Int, - t.Key('message', optional=True): t.String -}) - -enrollResp = t.Dict({ - t.Key('status'): t.Int, - t.Key('message', optional=True): t.String -}) \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/feature/steps/orderer_impl.py b/app/platform/fabric/e2e-test/feature/steps/orderer_impl.py deleted file mode 100644 index 3deeaf335..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/orderer_impl.py +++ /dev/null @@ -1,205 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from behave import * -import os -import subprocess -import time -import orderer_util -import basic_impl -import compose_util -import common_util - - -ORDERER_TYPES = ["solo", - "kafka", - "solo-msp"] - -PROFILE_TYPES = {"solo": "SampleInsecureSolo", - "kafka": "SampleInsecureKafka", - "solo-msp": "SampleSingleMSPSolo"} - - -@given(u'I test the access to the generated python protobuf files') -def step_impl(context): - orderer_util._testAccessPBMethods() - -@given(u'a bootstrapped orderer network of type {ordererType}') -def step_impl(context, ordererType): - basic_impl.bootstrapped_impl(context, ordererType) - -@given(u'an unbootstrapped network using "{dockerFile}"') -def compose_impl(context, dockerFile): - pass - -@given(u'an orderer connected to the kafka cluster') -def step_impl(context): - pass - -@given(u'the {key} environment variable is {value}') -def step_impl(context, key, value): - if not hasattr(context, "composition"): - context.composition = compose_util.Composition(context, startContainers=False) - changedString = common_util.changeFormat(value) - context.composition.environ[key] = changedString - -@given(u'the peer "{peer}" is setup to use a client identity') -def step_impl(context, peer): - if not hasattr(context, "composition"): - context.composition = compose_util.Composition(context, startContainers=False) - peerInfo = peer.split('.') - if peer not in context.composition.environ: - context.composition.environ[peer] = {} - context.composition.environ[peer]["CORE_PEER_TLS_CERT_FILE"] = "/var/hyperledger/users/Admin@{}.example.com/tls/client.crt".format(peerInfo[1]) - context.composition.environ[peer]["CORE_PEER_TLS_KEY_FILE"] = "/var/hyperledger/users/Admin@{}.example.com/tls/client.key".format(peerInfo[1]) - -@given(u'a certificate from {organization} is added to the kafka orderer network') -def step_impl(context, organization): - pass - -@given(u'a kafka cluster') -def step_impl(context): - pass - -@when(u'a message is broadcasted') -def step_impl(context): - broadcast_impl(context, 1) - -@when(u'{count} unique messages are broadcasted') -def broadcast_impl(context, count): - pass - -@when(u'I {takeDownType} the current kafka topic partition leader on {orderer}') -def stop_leader_impl(context, orderer, takeDownType): - brokers = orderer_util.getKafkaBrokerList(context, orderer) - kafkas = orderer_util.getKafkaIPs(context, brokers) - leader = orderer_util.getKafkaPartitionLeader(kafkaBrokers=kafkas) - - # Save stopped broker - if not hasattr(context, "stopped_brokers"): - context.stopped_brokers = [] - context.stopped_brokers.append(leader) - # Now that we know the kafka leader, stop it - basic_impl.bringdown_impl(context, leader, takeDownType) - - if not hasattr(context, "prevLeader"): - context.prevLeader = leader - -@when(u'I {bringUpType} a former kafka topic partition leader') -def step_impl(context, bringUpType): - start_leader_impl(context, "orderer0.example.com", bringUpType) - -@when(u'I {takeDownType} the current kafka topic partition leader') -def step_impl(context, takeDownType): - stop_leader_impl(context, "orderer0.example.com", takeDownType) - -@when(u'a kafka broker that is not in the ISR set is stopped on {orderer}') -def stop_non_isr_impl(context, orderer): - brokers = orderer_util.getKafkaBrokerList(context, orderer) - kafkas = orderer_util.getKafkaIPs(context, brokers) - kafka = orderer_util.getNonISRKafkaBroker(kafkaBrokers=kafkas) - - if not hasattr(context, "stopped_non_isr"): - context.stopped_non_isr = [] - context.stopped_non_isr.append(kafka) - context.composition.stop([kafka]) - -@when(u'a kafka broker that is not in the ISR set is stopped') -def step_impl(context): - stop_non_isr_impl(context, "orderer0.example.com") - -@when(u'I {bringUpType} a former kafka topic partition leader for {orderer}') -def start_leader_impl(context, orderer, bringUpType): - # Get the last stopped kafka broker from the stopped broker list - broker = context.stopped_brokers.pop() - basic_impl.bringup_impl(context, broker, bringUpType) - -@when(u'a new organization {organization} certificate is added') -def step_impl(context, organization): - pass - -@when(u'authorization for {organization} is removed from the kafka cluster') -def step_impl(context, organization): - pass - -@when(u'authorization for {organization} is added to the kafka cluster') -def step_impl(context, organization): - pass - -@then(u'ensure kafka ISR set contains {count:d} brokers') -def step_impl(context, count): - brokers = orderer_util.getKafkaBrokerList(context, "orderer0.example.com") - kafkas = orderer_util.getKafkaIPs(context, brokers) - _, isr_list = orderer_util.getKafkaTopic(kafkaBrokers=kafkas) - assert len(isr_list) == count, "len of isr_list: {0} does not match expected number of brokers: {1}".format(len(isr_list), count) - -@then(u'the broker is reported as down') -def step_impl(context): - brokers = orderer_util.getKafkaBrokerList(context, "orderer0.example.com") - kafkas = orderer_util.getKafkaIPs(context, brokers) - _, isr_list = orderer_util.getKafkaTopic(kafkaBrokers=kafkas) - - #as long as we have 1 broker in isr_list, check that none from stopped_brokers list exist in isr_list - if isr_list >= 1: - for kafka in context.stopped_brokers: - assert kafka not in isr_list, "stopped broker still exists in isr_set and is not removed" - - #for each broker in isr_list check logs - for kafka in isr_list: - assert common_util.is_in_log([kafka], "Shutdown completed (kafka.server.ReplicaFetcherThread)"), \ - "could not verify in the remaining broker logs that prevLeader is down" - -@then(u'the broadcasted message is delivered') -def step_impl(context): - verify_deliver_impl(context, 1, 1) - -@then(u'all {count} messages are delivered in {numBlocks} block') -def step_impl(context, count, numBlocks): - verify_deliver_impl(context, count, numBlocks) - -@then(u'all {count} messages are delivered within {timeout} seconds') -def step_impl(context, count, timeout): - verify_deliver_impl(context, count, None, timeout) - -@then(u'all {count} messages are delivered in {numBlocks} within {timeout} seconds') -def verify_deliver_impl(context, count, numBlocks, timeout=60): - pass - -@then(u'I get a successful broadcast response') -def step_impl(context): - recv_broadcast_impl(context, 1) - -@then(u'I get {count} successful broadcast responses') -def recv_broadcast_impl(context, count): - pass - -@then(u'the {organization} cannot connect to the kafka cluster') -def step_impl(context, organization): - pass - -@then(u'the {organization} is able to connect to the kafka cluster') -def step_impl(context, organization): - pass - -@then(u'the zookeeper notifies the orderer of the disconnect') -def step_impl(context): - pass - -@then(u'the orderer functions successfully') -def step_impl(context): - # Check the logs for certain key info - be sure there are no errors in the logs - pass - -@then(u'the orderer stops sending messages to the cluster') -def step_impl(context): - pass - -@then(u'the {key} environment variable is {value} on node "{node}"') -def step_impl(context, key, value, node): - assert hasattr(context, "composition"), "There are no containers running for this test" - changedString = common_util.changeFormat(value) - containerValue = context.composition.getEnvFromContainer(node, key) - assert containerValue == changedString, "The environment variable on the container was set to '{0}' (expected value: '{1}')".format(containerValue, changedString) diff --git a/app/platform/fabric/e2e-test/feature/steps/orderer_util.py b/app/platform/fabric/e2e-test/feature/steps/orderer_util.py deleted file mode 100644 index a6159b7b2..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/orderer_util.py +++ /dev/null @@ -1,94 +0,0 @@ -# -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import sys -import datetime -from pykafka import KafkaClient -import endorser_util - -try: - pbFilePath = "../feature" - sys.path.insert(0, pbFilePath) - from common import common_pb2 -except: - print("ERROR! Failed to import the protobuf libraries common_pb2 from the ../feature/common/ directory: {0}".format(sys.exc_info()[0])) - sys.exit(1) - -def getOrdererList(context): - # Get the Orderers list from the orderer container name - orderers = list() - for container in context.composition.containerDataList: - if 'orderer' in container.containerName: - orderers.append(container.containerName) - return orderers - -def getKafkaBrokerList(context, orderer): - # Get the kafka broker list from the orderer environment var - kafkaBrokers = "" - for container in context.composition.containerDataList: - if orderer in container.containerName: - kafkaBrokers = container.getEnv('CONFIGTX_ORDERER_KAFKA_BROKERS') - break - - # Be sure that kafka broker list returned is not an empty string - assert kafkaBrokers != "", "There are no kafka brokers set in the orderer environment" - brokers = kafkaBrokers[1:-1].split(',') - return brokers - -def getKafkaIPs(context, kafkaList): - kafkas = [] - for kafka in kafkaList: - containerName = kafka.split(':')[0] - container = context.composition.getContainerFromName(containerName, context.composition.containerDataList) - kafkas.append("{0}:9092".format(container.ipAddress)) - return kafkas - -def getKafkaTopic(kafkaBrokers=["0.0.0.0:9092"], channel=endorser_util.SYS_CHANNEL_ID): - kafkas = ",".join(kafkaBrokers) - client = KafkaClient(hosts=kafkas) - if client.topics == {} and channel is None: - topic = client.topics[endorser_util.TEST_CHANNEL_ID] - elif client.topics == {} and channel is not None: - topic = client.topics[channel] - elif channel is not None and channel in client.topics: - topic = client.topics[channel] - elif channel is None and client.topics != {}: - topic_list = client.topics.keys() - topic = client.topics[topic_list[0]] - - # Print brokers in ISR - print("ISR: {}".format(["kafka{}".format(broker.id) for broker in topic.partitions[0].isr])) - isr_set = ["kafka{}".format(broker.id) for broker in topic.partitions[0].isr] - return topic, isr_set - -def getKafkaPartitionLeader(kafkaBrokers=["0.0.0.0:9092"], channel=endorser_util.SYS_CHANNEL_ID): - topic, isr_set = getKafkaTopic(kafkaBrokers, channel) - leader = "kafka{0}".format(topic.partitions[0].leader.id) - print("current leader: {}".format(leader)) - return leader - -def getNonISRKafkaBroker(kafkaBrokers=["0.0.0.0:9092"], channel=endorser_util.SYS_CHANNEL_ID): - topic, isr_set = getKafkaTopic(kafkaBrokers, channel) - kafka = None - for kafkaNum in range(len(kafkaBrokers)): - if str(kafkaNum) not in topic.partitions[0].isr: - kafka = "kafka{0}".format(kafkaNum) - return kafka - -def generateMessageEnvelope(): - channel_header = common_pb2.ChannelHeader(channel_id=endorser_util.TEST_CHANNEL_ID, - type=common_pb2.ENDORSER_TRANSACTION) - header = common_pb2.Header(channel_header=channel_header.SerializeToString(), - signature_header=common_pb2.SignatureHeader().SerializeToString()) - payload = common_pb2.Payload(header=header, - data=str.encode("Functional test: {0}".format(datetime.datetime.utcnow())) ) - envelope = common_pb2.Envelope(payload=payload.SerializeToString()) - return envelope - -def _testAccessPBMethods(): - envelope = generateMessageEnvelope() - assert isinstance(envelope, common_pb2.Envelope), "Unable to import protobufs from feature-upgrade directory" diff --git a/app/platform/fabric/e2e-test/feature/steps/remote_util.py b/app/platform/fabric/e2e-test/feature/steps/remote_util.py deleted file mode 100644 index b8d2b781f..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/remote_util.py +++ /dev/null @@ -1,119 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -import os -import sys -import yaml -import json - -from request_util import httpGet, httpPost, getAttributeFromJSON, JSON_HEADER - - -def getNetworkDetails(context): - """ Get the network details from the network yaml or json file""" - if hasattr(context, "network"): - fd = open(context.network, "r") - try: - if context.network.endswith(("yaml", "yml")): - context.networkInfo = yaml.load(fd) - elif context.network.endswith("json"): - context.networkInfo = json.load(fd) - except: - print("Unable to load the network configuration file: {0}".format(sys.exc_info()[0]) ) - context.networkInfo = json.load(fd) - return context - - -def getHeaders(context): - headers = context.networkInfo["headers"].copy() - return headers - - -def getNetworkID(context): - """ Get the Network ID.""" - if hasattr(context, 'networkInfo'): - return context.networkInfo["networkID"] - - -def stopNode(context, peer): - """Stops the peer node on a specific network.""" - nodeId = context.networkInfo["nodes"][peer]["nodeID"] - url = context.networkInfo["urls"]["stopURL"].format(IPaddress=context.networkInfo["IPaddress"], - port=context.networkInfo["port"], - networkID=context.networkInfo["networkID"], - nodeID=nodeId) - body = {} - response = httpPost(url, body, headers=getHeaders(context)) - - -def startNode(context, peer): - """Start the peer node on a specific network.""" - nodeId = context.networkInfo["nodes"][peer]["nodeID"] - url = context.networkInfo["urls"]["startURL"].format(IPaddress=context.networkInfo["IPaddress"], - port=context.networkInfo["port"], - networkID=context.networkInfo["networkID"], - nodeID=nodeId) - body = {} - response = httpPost(url, body, headers=getHeaders(context)) - - -def restartNode(context, peer): - """Restart the peer node on a specific network.""" - nodeId = context.networkInfo["nodes"][peer]["nodeID"] - url = context.networkInfo["urls"]["restartURL"].format(IPaddress=context.networkInfo["IPaddress"], - port=context.networkInfo["port"], - networkID=context.networkInfo["networkID"], - nodeID=nodeId) - body = {} - response = httpPost(url, body, headers=getHeaders(context)) - - -def getNodeStatus(context, peer): - """Get the Node status.""" - nodeId = context.networkInfo["nodes"][peer]["nodeID"] - body = {"nodes": [nodeId]} - url = context.networkInfo["urls"]["statusURL"].format(IPaddress=context.networkInfo["IPaddress"], - port=context.networkInfo["port"], - networkID=context.networkInfo["networkID"]) - response = httpPost(url, body, headers=getHeaders(context)) - return response - - -def getNodeLogs(context, component): - """ Get the Node logs.""" - nodeId = context.networkInfo["nodes"][component]["nodeID"] - url = context.networkInfo["urls"]["getLogsURL"].format(IPaddress=context.networkInfo["IPaddress"], - port=context.networkInfo["port"], - nodeID=nodeId, - networkID=context.networkInfo["networkID"]) - response = httpGet(url, headers=getHeaders(context)) - return response - - -def getChaincodeStatus(context, peer): - """ Get the Node status.""" - nodeId = context.networkInfo["nodes"][peer]["nodeID"] - url = context.networkInfo["urls"]["ccStatusURL"].format(IPaddress=context.networkInfo["IPaddress"], - port=context.networkInfo["port"], - peer=nodeId, - networkID=context.networkInfo["networkID"]) - response = httpGet(url, headers=getHeaders(context)) - return response - - -def getChaincodeLogs(context, peer, channelID): - """ Get the Chaincode logs.""" - nodeId = context.networkInfo["nodes"][peer]["nodeID"] - if hasattr(context, 'chaincodeSpec'): - url = context.networkInfo["urls"]["getCCLogsURL"].format(IPaddress=context.networkInfo["IPaddress"], - port=context.networkInfo["port"], - nodeID=nodeId, - ccID=nodeId, - networkID=context.networkInfo["networkID"]) - response = httpGet(url, headers=getHeaders(context)) - else: - response = "No chaincode has been deployed" - return response diff --git a/app/platform/fabric/e2e-test/feature/steps/request_util.py b/app/platform/fabric/e2e-test/feature/steps/request_util.py deleted file mode 100644 index 0a24cccd3..000000000 --- a/app/platform/fabric/e2e-test/feature/steps/request_util.py +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright IBM Corp All Rights Reserved -# -# SPDX-License-Identifier: Apache-2.0 -# - -import requests, json - -REST_PORT = "7050" -ACCEPT_JSON_HEADER = {'Accept': 'application/json'} -JSON_HEADER = {'Accept': 'application/json', - 'Content-type': 'application/json'} - -def getAttributeFromJSON(attribute, json): - foundJson = getHierarchyAttributesFromJSON(attribute.split("."), json) - assert foundJson is not None, "Unable to locate {} in JSON".format(attribute) - return foundJson - -def getHierarchyAttributesFromJSON(attributes, json): - foundJson = None - currentAttribute = attributes[0] - if currentAttribute in json: - foundJson = json[currentAttribute] - attributesToGo = attributes[1:] - if len(attributesToGo) > 0: - foundJson = getHierarchyAttributesFromJSON(attributesToGo, foundJson) - return foundJson - -def httpGet(url, headers=ACCEPT_JSON_HEADER, expectSuccess=True): - return _request("GET", url, headers, expectSuccess=expectSuccess) - -def httpPost(url, body, headers=ACCEPT_JSON_HEADER, expectSuccess=True): - return _request("POST", url, headers=headers, expectSuccess=expectSuccess, json=body) - -def _request(method, url, headers, expectSuccess=True, **kwargs): - response = requests.request(method, url, headers=headers, verify=False, **kwargs) - response.connection.close() - if expectSuccess: - assert response.status_code == 200, "Failed to {} to {}: {}".format(method, url, response.text) - #print("Response from {}:".format(url)) - #print(formatResponseText(response)) - return response - -def formatResponseText(response): - try: - return json.dumps(response.json(), indent = 4)[:300] - except: - return "" diff --git a/app/platform/fabric/e2e-test/runTestSuite.sh b/app/platform/fabric/e2e-test/runTestSuite.sh new file mode 100755 index 000000000..e763ca54b --- /dev/null +++ b/app/platform/fabric/e2e-test/runTestSuite.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +echo "#### Downloaded fabric-test repo" + +set -e + +mkdir -p $GOPATH/src/github.com/hyperledger + +pushd $GOPATH/src/github.com/hyperledger +if [ ! -d fabric-test ]; then + git clone https://github.com/hyperledger/fabric-test.git -b release-1.4 +fi +cd fabric-test +git checkout 45799a2ee4eefa49ae705cc57ed415270c35d60a +echo "#### Updated each sub-module under fabric-test repo" +popd + +pushd $GOPATH/src/github.com/hyperledger/fabric-test/tools/PTE +npm install fabric-client@1.4.5 +npm install fabric-ca-client@1.4.5 +echo "#### Installed required node packages" +popd + +rm -f PTE +ln -s $GOPATH/src/github.com/hyperledger/fabric-test/tools/PTE ./PTE + +pushd specs +echo "#### Starting Ginkgo based test suite" +ginkgo -v +popd \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/specs/apitest-input-multiprofile.yml b/app/platform/fabric/e2e-test/specs/apitest-input-multiprofile.yml new file mode 100644 index 000000000..0ebc37eae --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/apitest-input-multiprofile.yml @@ -0,0 +1,123 @@ +organizations: + - name: org1 +#! For smoke test suite, connection-profile are read from smoke directory + connProfilePath: ./connection-profile/connection_profile_org1.yaml + - name: org2 + connProfilePath: ./connection-profile/connection_profile_org2.yaml + +createChannel: + - channelName: commonchannel +#! For smoke test suite, channel-artifacts are read from smoke directory + channelTxPath: ./channel-artifacts/commonchannel/commonchannel.tx + organizations: org1 + - channelName: org1channel +#! For smoke test suite, channel-artifacts are read from smoke directory + channelTxPath: ./channel-artifacts/org1channel/org1channel.tx + organizations: org1 + - channelName: org2channel +#! For smoke test suite, channel-artifacts are read from smoke directory + channelTxPath: ./channel-artifacts/org2channel/org2channel.tx + organizations: org2 + +anchorPeerUpdate: + - channelName: commonchannel + organizations: org1 +#! For smoke test suite, channel-artifacts are read from smoke directory + anchorPeerUpdateTxPath: ./channel-artifacts/commonchannel/commonchannelorg1anchor.tx + - channelName: commonchannel + organizations: org2 + anchorPeerUpdateTxPath: ./channel-artifacts/commonchannel/commonchannelorg2anchor.tx + - channelName: org1channel + organizations: org1 +#! For smoke test suite, channel-artifacts are read from smoke directory + anchorPeerUpdateTxPath: ./channel-artifacts/org1channel/org1channelorg1anchor.tx + - channelName: org2channel + organizations: org2 +#! For smoke test suite, channel-artifacts are read from smoke directory + anchorPeerUpdateTxPath: ./channel-artifacts/org2channel/org2channelorg2anchor.tx + +joinChannel: +# joins all peers in listed organziations to all channels based on channelPrefix and numChannels + - channelName: commonchannel + organizations: org1,org2 + - channelName: org1channel + organizations: org1 + - channelName: org2channel + organizations: org2 + +installChaincode: +# installs chaincode with specified name on all peers in listed organziations + - name: samplecc + version: v1 + path: github.com/hyperledger/fabric-test/chaincodes/samplecc/go + organizations: org1,org2 + language: golang + metadataPath: "" + + - name: samplecc + version: v2 + path: github.com/hyperledger/fabric-test/chaincodes/samplecc/go + organizations: org1,org2 + language: golang + metadataPath: "" + +instantiateChaincode: + - channelName: testorgschannel0 + name: samplecc + version: v1 + args: "" + organizations: org1 + endorsementPolicy: 2of(org1,org2) + collectionPath: "" + +upgradeChaincode: + - channelName: testorgschannel0 + name: samplecc + version: v2 + args: "" + organizations: org1 + endorsementPolicy: 1of(org1,org2) + collectionPath: "" + +invokes: + - channelName: testorgschannel0 + name: samplecc + targetPeers: OrgAnchor + nProcPerOrg: 2 + nRequest: 10 + runDur: 0 + organizations: org1,org2 + txnOpt: + - mode: constant + options: + constFreq: 0 + devFreq: 0 + queryCheck: 100 + eventOpt: + type: FilteredBlock + listener: Block + timeout: 240000 + ccOpt: + ccType: ccchecker + keyStart: 0 + payLoadMin: 1024 + payLoadMax: 2048 + args: "put,a1,1" + +queries: + - channelName: testorgschannel0 + name: samplecc + targetPeers: OrgAnchor + nProcPerOrg: 2 + nRequest: 10 + runDur: 0 + organizations: org1,org2 + ccOpt: + ccType: ccchecker + keyStart: 0 + txnOpt: + - mode: constant + options: + constFreq: 0 + devFreq: 0 + args: "get,a1" diff --git a/app/platform/fabric/e2e-test/specs/apitest-input-singleprofile.yml b/app/platform/fabric/e2e-test/specs/apitest-input-singleprofile.yml new file mode 100644 index 000000000..236ea79b1 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/apitest-input-singleprofile.yml @@ -0,0 +1,117 @@ +organizations: + - name: org1 +#! For smoke test suite, connection-profile are read from smoke directory + connProfilePath: ./connection-profile/connection_profile_org1.yaml + - name: org2 + connProfilePath: ./connection-profile/connection_profile_org2.yaml + +createChannel: + - channelName: commonchannel +#! For smoke test suite, channel-artifacts are read from smoke directory + channelTxPath: ./channel-artifacts/commonchannel/commonchannel.tx + organizations: org1 + - channelName: org1channel +#! For smoke test suite, channel-artifacts are read from smoke directory + channelTxPath: ./channel-artifacts/org1channel/org1channel.tx + organizations: org1 + - channelName: org2channel +#! For smoke test suite, channel-artifacts are read from smoke directory + channelTxPath: ./channel-artifacts/org2channel/org2channel.tx + organizations: org2 + +anchorPeerUpdate: + - channelName: commonchannel + organizations: org1 +#! For smoke test suite, channel-artifacts are read from smoke directory + anchorPeerUpdateTxPath: ./channel-artifacts/commonchannel/commonchannelorg1anchor.tx + - channelName: commonchannel + organizations: org2 + anchorPeerUpdateTxPath: ./channel-artifacts/commonchannel/commonchannelorg2anchor.tx + - channelName: org1channel + organizations: org1 +#! For smoke test suite, channel-artifacts are read from smoke directory + anchorPeerUpdateTxPath: ./channel-artifacts/org1channel/org1channelorg1anchor.tx + +joinChannel: +# joins all peers in listed organziations to all channels based on channelPrefix and numChannels + - channelName: commonchannel + organizations: org1,org2 + - channelName: org1channel + organizations: org1 + +installChaincode: +# installs chaincode with specified name on all peers in listed organziations + - name: samplecc + version: v1 + path: github.com/hyperledger/fabric-test/chaincodes/samplecc/go + organizations: org1,org2 + language: golang + metadataPath: "" + + - name: samplecc + version: v2 + path: github.com/hyperledger/fabric-test/chaincodes/samplecc/go + organizations: org1,org2 + language: golang + metadataPath: "" + +instantiateChaincode: + - channelName: commonchannel + name: samplecc + version: v1 + args: "" + organizations: org1 + endorsementPolicy: 2of(org1,org2) + collectionPath: "" + +upgradeChaincode: + - channelName: testorgschannel0 + name: samplecc + version: v2 + args: "" + organizations: org1 + endorsementPolicy: 1of(org1,org2) + collectionPath: "" + +invokes: + - channelName: commonchannel + name: samplecc + targetPeers: OrgAnchor + nProcPerOrg: 2 + nRequest: 10 + runDur: 0 + organizations: org1,org2 + txnOpt: + - mode: constant + options: + constFreq: 0 + devFreq: 0 + queryCheck: 100 + eventOpt: + type: FilteredBlock + listener: Block + timeout: 240000 + ccOpt: + ccType: ccchecker + keyStart: 0 + payLoadMin: 1024 + payLoadMax: 2048 + args: "put,a1,1" + +queries: + - channelName: commonchannel + name: samplecc + targetPeers: OrgAnchor + nProcPerOrg: 2 + nRequest: 10 + runDur: 0 + organizations: org1,org2 + ccOpt: + ccType: ccchecker + keyStart: 0 + txnOpt: + - mode: constant + options: + constFreq: 0 + devFreq: 0 + args: "get,a1" diff --git a/app/platform/fabric/e2e-test/specs/apitest-input-singleprofile_addnewch.yml b/app/platform/fabric/e2e-test/specs/apitest-input-singleprofile_addnewch.yml new file mode 100644 index 000000000..a5d297ef4 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/apitest-input-singleprofile_addnewch.yml @@ -0,0 +1,100 @@ +organizations: + - name: org1 +#! For smoke test suite, connection-profile are read from smoke directory + connProfilePath: ./connection-profile/connection_profile_org1.yaml + - name: org2 + connProfilePath: ./connection-profile/connection_profile_org2.yaml + +createChannel: + - channelName: channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422 +#! For smoke test suite, channel-artifacts are read from smoke directory + channelTxPath: ./channel-artifacts/channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422/channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422.tx + organizations: org1 + +anchorPeerUpdate: + - channelName: channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422 + organizations: org1 +#! For smoke test suite, channel-artifacts are read from smoke directory + anchorPeerUpdateTxPath: ./channel-artifacts/channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422/channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422org1anchor.tx + +joinChannel: +# joins all peers in listed organziations to all channels based on channelPrefix and numChannels + - channelName: channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422 + organizations: org1 + +installChaincode: +# installs chaincode with specified name on all peers in listed organziations + - name: samplecc + version: v1 + path: github.com/hyperledger/fabric-test/chaincodes/samplecc/go + organizations: org1,org2 + language: golang + metadataPath: "" + + - name: samplecc + version: v2 + path: github.com/hyperledger/fabric-test/chaincodes/samplecc/go + organizations: org1,org2 + language: golang + metadataPath: "" + +instantiateChaincode: + - channelName: channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422 + name: samplecc + version: v1 + args: "" + organizations: org1 + endorsementPolicy: 1of(org1,org2) + collectionPath: "" + +upgradeChaincode: + - channelName: testorgschannel0 + name: samplecc + version: v2 + args: "" + organizations: org1 + endorsementPolicy: 1of(org1,org2) + collectionPath: "" + +invokes: + - channelName: channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422 + name: samplecc + targetPeers: OrgAnchor + nProcPerOrg: 2 + nRequest: 10 + runDur: 0 + organizations: org1 + txnOpt: + - mode: constant + options: + constFreq: 0 + devFreq: 0 + queryCheck: 100 + eventOpt: + type: FilteredBlock + listener: Block + timeout: 240000 + ccOpt: + ccType: ccchecker + keyStart: 0 + payLoadMin: 1024 + payLoadMax: 2048 + args: "put,a1,1" + +queries: + - channelName: channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422 + name: samplecc + targetPeers: OrgAnchor + nProcPerOrg: 2 + nRequest: 10 + runDur: 0 + organizations: org1 + ccOpt: + ccType: ccchecker + keyStart: 0 + txnOpt: + - mode: constant + options: + constFreq: 0 + devFreq: 0 + args: "get,a1" diff --git a/app/platform/fabric/e2e-test/specs/apitest-network-spec.yml b/app/platform/fabric/e2e-test/specs/apitest-network-spec.yml new file mode 100644 index 000000000..c4a311e47 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/apitest-network-spec.yml @@ -0,0 +1,139 @@ +#! Copyright IBM Corp. All Rights Reserved. +#! +#! SPDX-License-Identifier: Apache-2.0 + +--- +#! fabricVersion: +#! Released images are pulled from docker hub hyperledger/, e.g. 1.4.5 or 2.0.0 +#! Development stream images are pulled from +#! hyperledger-fabric.jfrog.io/, e.g. 1.4.5-stable or 2.0.0-stable +fabricVersion: 1.4.4 +#! peer database ledger type (couchdb, goleveldb) +dbType: goleveldb +#! This parameter is used to define fabric logging spec in peers +peerFabricLoggingSpec: error +#! This parameter is used to define fabric logging spec in orderers +ordererFabricLoggingSpec: error +#! tls in the network (true, false or mutual(mutualtls)) +tls: true +#! fabric metrics with prometheus (true/false) +metrics: false +#! true - enable gossip and dynamic leader election +#! false - disable gossip and set all peers as org leaders +gossipEnable: false +#! enable node ou's in fabric network (true/false) +enableNodeOUs: false + +#! For smoke test suite, crypto-config, connection-profile and channel-artifacts are stored +#! in smoke directory +artifactsLocation: . + +#! Orderer Config Settings +orderer: +#! Consensus type + ordererType: etcdraft + batchSize: + maxMessageCount: 100 + absoluteMaxBytes: 10 MB + preferredMaxBytes: 2 MB + batchTimeOut: 2s +#! Etcd raft options and this will be used when ordererType is +#! selected as etcdraft + etcdraftOptions: + tickInterval: 500ms + electionTick: 10 + heartbeatTick: 1 + maxInflightBlocks: 5 + snapshotIntervalSize: 100 MB + +#! Not being used for smoke test suite +#! Number of kafka and zookeeper to be launched in network +#! when ordererType is kafka +kafka: + numKafka: 5 + #! number of kafka replications for each channel + numKafkaReplications: 3 + numZookeepers: 3 + +ordererOrganizations: +- name: ordererorg1 + mspId: OrdererOrgExampleCom + numOderers: 5 + numCa: 0 + +peerOrganizations: +- name: org1 + mspId: Org1ExampleCom + numPeers: 2 + numCa: 1 + +- name: org2 + mspId: Org2ExampleCom + numPeers: 2 + numCa: 1 + +#! Capabilites for Orderer, Channel, Application groups +ordererCapabilities: + V1_4_2: true + +channelCapabilities: + V1_4_3: true + +applicationCapabilities: + V1_4_2: true + +#! Create the channel creation transactions; every org will be included in every channel +#! This used testorgschannel as the prefix and channels are used like testorgschannel0, +#! testorgschannel1.... based on number of channels passed +#! (note: client will need to submit the transactions to create channels) +numChannels: 1 + +#! Not being used for smoke test suite +k8s: + serviceType: NodePort + #! dataPersistence is used to store the data from fabric containers + #! It can take values of true, false and local + #! When true is used, it uses storageClass and storageCapacity to create + #! persistent volumes. When false is used, backup will not be configured. + #! When local is used, hostPath will be used to store the data from fabric containers + #! to worker nodes on which pods are running. + dataPersistence: true + storageClass: default + storageCapacity: 20Gi + resources: + orderers: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: "0.5" + memory: 1Gi + peers: + limits: + cpu: "0.5" + memory: 2Gi + requests: + cpu: "0.5" + memory: 2Gi +#! dind will be used to run all chaincode containers of a peer + dind: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: "1" + memory: 1Gi + couchdb: + limits: + cpu: "0.2" + memory: 1Gi + requests: + cpu: "0.1" + memory: 1Gi + kafka: + limits: + cpu: "0.2" + memory: 1Gi + requests: + cpu: "0.1" + memory: 1Gi diff --git a/app/platform/fabric/e2e-test/specs/apitest_suite_test.go b/app/platform/fabric/e2e-test/specs/apitest_suite_test.go new file mode 100644 index 000000000..a61fa8b6e --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/apitest_suite_test.go @@ -0,0 +1,51 @@ +package apitest + +import ( + "strings" + "testing" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" + . "github.com/onsi/gomega" + + "github.com/hyperledger/fabric-test/tools/operator/launcher" + "github.com/hyperledger/fabric-test/tools/operator/networkclient" +) + +func TestRestApi(t *testing.T) { + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter("results_rest-api-test-suite.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Rest Api Test Suite", []Reporter{junitReporter}) +} + +// Bringing up network using BeforeSuite +var _ = BeforeSuite(func() { + networkSpecPath := "apitest-network-spec.yml" + err := launcher.Launcher("up", "docker", "", networkSpecPath) + Expect(err).NotTo(HaveOccurred()) +}) + +// Cleaning up network launched from BeforeSuite and removing all chaincode containers +// and chaincode container images using AfterSuite +var _ = AfterSuite(func() { + networkSpecPath := "apitest-network-spec.yml" + err := launcher.Launcher("down", "docker", "", networkSpecPath) + Expect(err).NotTo(HaveOccurred()) + + dockerList := []string{"ps", "-aq", "-f", "status=exited"} + containerList, _ := networkclient.ExecuteCommand("docker", dockerList, false) + if containerList != "" { + list := strings.Split(containerList, "\n") + containerArgs := []string{"rm", "-f"} + containerArgs = append(containerArgs, list...) + networkclient.ExecuteCommand("docker", containerArgs, true) + } + ccimagesList := []string{"images", "-q", "--filter=reference=dev*"} + images, _ := networkclient.ExecuteCommand("docker", ccimagesList, false) + if images != "" { + list := strings.Split(images, "\n") + imageArgs := []string{"rmi", "-f"} + imageArgs = append(imageArgs, list...) + networkclient.ExecuteCommand("docker", imageArgs, true) + } +}) diff --git a/app/platform/fabric/e2e-test/specs/apitest_test.go b/app/platform/fabric/e2e-test/specs/apitest_test.go new file mode 100644 index 000000000..7c415e857 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/apitest_test.go @@ -0,0 +1,454 @@ +package apitest + +import ( + "fmt" + "log" + "os/exec" + "strconv" + "strings" + + "github.com/go-resty/resty/v2" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/hyperledger/fabric-test/tools/operator/networkclient" + "github.com/hyperledger/fabric-test/tools/operator/testclient" +) + +type UserData struct { + Message string `json:"message"` + Name string `json:"name"` +} + +type LoginResponse struct { + Status int `json:"status"` + Success bool `json:"success"` + Message string `json:"message"` + Token string `json:"token"` + User UserData `json:"user"` +} + +type RegisterResp struct { + Status int `json:"status"` + Message string `json:"message"` +} + +type ChannelData struct { + ID int `json:"id"` + Channelname string `json:"channelname"` + Blocks int `json:"blocks"` + ChannelGenesisHash string `json:"channel_genesis_hash"` + Transactions int `json:"transactions"` + Createdat string `json:"createdat"` + ChannelHash string `json:"channel_hash"` +} + +type ChannelsInfoResp struct { + Status int `json:"status"` + Channels []ChannelData `json:"channels"` +} + +type ChannelsResponse struct { + Status int `json:"status"` + Channels []string `json:"channels"` +} + +var ( + channelMonitored string +) + +func CheckHowManyEventHubRegistered() int { + arg := fmt.Sprintf(`docker logs explorer.mynetwork.com | grep "Successfully created channel event hub for \[%s\]" | wc -l`, channelMonitored) + cmd := exec.Command("sh", "-c", arg) + result, err := cmd.Output() + if err != nil { + log.Fatal(err) + } + ret, _ := strconv.Atoi(strings.TrimSuffix(string(result), "\n")) + return ret +} + +func CheckIfSwitchedToNewOrderer() int { + arg := `docker logs explorer.mynetwork.com | grep "Succeeded to switch default orderer to" | wc -l` + cmd := exec.Command("sh", "-c", arg) + result, err := cmd.Output() + if err != nil { + log.Fatal(err) + } + ret, _ := strconv.Atoi(strings.TrimSuffix(string(result), "\n")) + return ret +} + +func StopNode(nodeName string) { + cmd := exec.Command("docker", "rm", "-f", nodeName) + _, err := cmd.Output() + if err != nil { + log.Fatal(err) + } +} + +var _ = Describe("REST API Test Suite", func() { + + Describe("Running REST API Test Suite in fabric-test", func() { + var ( + action string + inputSpecPath string + token string + channelGenesisHash string + blockHeight int + ) + It("starting fabric network", func() { + out, err := exec.Command("pwd").Output() + if err != nil { + log.Fatal(err) + } + fmt.Printf("The date is %s\n", out) + inputSpecPath = "apitest-input-singleprofile.yml" + + By("0) Generating channel artifacts") + _, err = networkclient.ExecuteCommand("./genchannelartifacts.sh", []string{}, true) + Expect(err).NotTo(HaveOccurred()) + + By("1) Creating channel") + action = "create" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("2) Joining Peers to channel") + action = "join" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("3) Updating channel with anchor peers") + action = "anchorpeer" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("4) Installing Chaincode on Peers") + action = "install" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("5) Instantiating Chaincode") + action = "instantiate" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("6) Sending Queries") + action = "query" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("7) Sending Invokes") + action = "invoke" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + }) + + It("launch explorer", func() { + _, err := networkclient.ExecuteCommand("./runexplorer.sh", []string{"single"}, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("get network list", func() { + type NetworklistInfo struct { + NetworkList [][]interface{} `json:"networkList"` + } + + // Create a Resty Client + client := resty.New() + + resp, err := client.R(). + EnableTrace(). + SetResult(&NetworklistInfo{}). + Get("http://localhost:8090/auth/networklist") + + Expect(err).ShouldNot(HaveOccurred()) + + result := resp.Result().(*NetworklistInfo) + list := []string{} + for _, val := range result.NetworkList { + list = append(list, val[0].(string)) + } + Expect(list).Should(HaveLen(1)) + Expect(list).Should(ContainElements([]string{"org1-network"})) + + }) + + It("login to org1-network", func() { + + client := resty.New() + + resp, err := client.R(). + EnableTrace(). + SetHeader("Content-Type", "application/json"). + SetBody(map[string]interface{}{"user": "admin", "password": "adminpw", "network": "org1-network"}). + SetResult(&LoginResponse{}). + Post("http://localhost:8090/auth/login") + + Expect(err).ShouldNot(HaveOccurred()) + + result := resp.Result().(*LoginResponse) + token = result.Token + + Expect(result.User.Message).Should(Equal("logged in")) + Expect(result.User.Name).Should(Equal("admin")) + }) + + It("get channels", func() { + + client := resty.New() + client.SetAuthToken(token) + + resp, err := client.R(). + EnableTrace(). + SetResult(&ChannelsResponse{}). + Get("http://localhost:8090/api/channels") + + Expect(err).ShouldNot(HaveOccurred()) + + result := resp.Result().(*ChannelsResponse) + + Expect(result.Channels).Should(ContainElements([]string{"org1channel", "commonchannel"})) + + }) + + It("get channels info", func() { + + client := resty.New() + client.SetAuthToken(token) + + resp, err := client.R(). + EnableTrace(). + SetResult(&ChannelsInfoResp{}). + Get("http://localhost:8090/api/channels/info") + + Expect(err).ShouldNot(HaveOccurred()) + + result := resp.Result().(*ChannelsInfoResp) + chList := []string{} + for _, ch := range result.Channels { + chList = append(chList, ch.Channelname) + if ch.Channelname == "commonchannel" { + channelGenesisHash = ch.ChannelGenesisHash + blockHeight = ch.Blocks - 1 + } + } + // Expect(result.Channels[0].Channelname).Should(Equal("commonchannel")) + Expect(chList).Should(ContainElements([]string{"commonchannel", "org1channel"})) + + }) + + It("get block info", func() { + type BlockResp struct { + Status int `json:"status"` + Number string `json:"number"` + PreviousHash string `json:"previous_hash"` + DataHash string `json:"data_hash"` + Transactions []interface{} `json:"transactions"` + } + + client := resty.New() + client.SetAuthToken(token) + + resp, err := client.R(). + EnableTrace(). + SetResult(&BlockResp{}). + Get("http://localhost:8090/api/block/" + channelGenesisHash + "/" + strconv.Itoa(blockHeight)) + + Expect(err).ShouldNot(HaveOccurred()) + result := resp.Result().(*BlockResp) + Expect(result.Status).Should(Equal(200)) + + }) + + It("get status of peers within commonchannel", func() { + type PeersStatusResp struct { + Status int `json:"status"` + Peers []interface{} `json:"peers"` + } + + client := resty.New() + client.SetAuthToken(token) + + resp, err := client.R(). + EnableTrace(). + SetResult(&PeersStatusResp{}). + Get("http://localhost:8090/api/peersStatus/" + "commonchannel") + + Expect(err).ShouldNot(HaveOccurred()) + result := resp.Result().(*PeersStatusResp) + Expect(result.Status).Should(Equal(200)) + }) + + It("get block activity", func() { + type BlockData struct { + Blocknum int `json:"blocknum"` + Txcount int `json:"txcount"` + Datahash string `json:"datahash"` + Blockhash string `json:"blockhash"` + Prehash string `json:"prehash"` + Createdt string `json:"createdt"` + Txhash []string `json:"txhash"` + Channelname string `json:"channelname"` + } + + type BlockActivityResp struct { + Status int `json:"status"` + Row []BlockData `json:"row"` + } + + client := resty.New() + client.SetAuthToken(token) + + resp, err := client.R(). + EnableTrace(). + SetResult(&BlockActivityResp{}). + Get("http://localhost:8090/api/blockActivity/" + channelGenesisHash) + + Expect(err).ShouldNot(HaveOccurred()) + result := resp.Result().(*BlockActivityResp) + Expect(result.Status).Should(Equal(200)) + Expect(result.Row[0].Channelname).Should(Equal("commonchannel")) + }) + + It("register user", func() { + type RegisterResp struct { + Status int `json:"status"` + Message string `json:"message"` + } + + client := resty.New() + client.SetAuthToken(token) + resp, err := client.R(). + EnableTrace(). + SetHeader("Content-Type", "application/json"). + SetBody(map[string]interface{}{"user": "test", "password": "test", "affiliation": "department2", "role": "admin"}). + SetResult(&RegisterResp{}). + Post("http://localhost:8090/api/register") + + Expect(err).ShouldNot(HaveOccurred()) + resultRegister := resp.Result().(*RegisterResp) + Expect(resultRegister.Status).Should(Equal(200)) + }) + + It("login with newly registered user", func() { + + client := resty.New() + client.SetAuthToken(token) + + resp, err := client.R(). + EnableTrace(). + SetHeader("Content-Type", "application/json"). + SetBody(map[string]interface{}{"user": "test", "password": "test", "network": "org1-network"}). + SetResult(&LoginResponse{}). + Post("http://localhost:8090/auth/login") + + Expect(err).ShouldNot(HaveOccurred()) + + resultLogin := resp.Result().(*LoginResponse) + + Expect(resultLogin.User.Message).Should(Equal("logged in")) + Expect(resultLogin.User.Name).Should(Equal("test")) + }) + + It("fail to register duplicate user", func() { + + client := resty.New() + client.SetAuthToken(token) + resp, err := client.R(). + EnableTrace(). + SetHeader("Content-Type", "application/json"). + SetBody(map[string]interface{}{"user": "test", "password": "test", "affiliation": "department2", "role": "admin"}). + SetResult(&RegisterResp{}). + Post("http://localhost:8090/api/register") + + Expect(err).ShouldNot(HaveOccurred()) + resultRegister := resp.Result().(*RegisterResp) + Expect(resultRegister.Status).Should(Equal(400)) + Expect(resultRegister.Message).Should(Equal("Error: already exists")) + }) + + Describe("Bugfix check:", func() { + + It("Add new channel to org1 and explorer should detect it", func() { + inputSpecPath = "apitest-input-singleprofile_addnewch.yml" + + By("1) Creating channel") + action := "create" + err := testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("2) Joining Peers to channel") + action = "join" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("3) Updating channel with anchor peers") + action = "anchorpeer" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("4) Instantiating Chaincode") + action = "instantiate" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("5) Sending Queries") + action = "query" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("6) Sending Invokes") + action = "invoke" + err = testclient.Testclient(action, inputSpecPath) + Expect(err).NotTo(HaveOccurred()) + + By("7) Retrieving channels again") + client := resty.New() + client.SetAuthToken(token) + + }) + + It("Should include the newly added channel when retrieving channels again", func() { + + client := resty.New() + client.SetAuthToken(token) + + resp, err := client.R(). + EnableTrace(). + SetResult(&ChannelsResponse{}). + Get("http://localhost:8090/api/channels") + Expect(err).ShouldNot(HaveOccurred()) + + result := resp.Result().(*ChannelsResponse) + Expect(result.Channels).Should(ContainElements([]string{"org1channel", "commonchannel", "channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422"})) + + }) + + It("Should create a new event hub for the newly added channel within 60s", func() { + channelMonitored = "channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422" + Eventually(CheckHowManyEventHubRegistered, 70, 5).Should(Equal(1)) + channelMonitored = "commonchannel" + Expect(CheckHowManyEventHubRegistered()).Should(Equal(1)) + channelMonitored = "org1channel" + Expect(CheckHowManyEventHubRegistered()).Should(Equal(1)) + }) + + It("Should keep running fine even after removing one of orderer peers", func() { + StopNode("orderer0-ordererorg1") + Eventually(CheckIfSwitchedToNewOrderer, 60, 5).Should(Equal(1)) + StopNode("orderer1-ordererorg1") + Eventually(CheckIfSwitchedToNewOrderer, 60, 5).Should(Equal(2)) + }) + + }) + + It("stop explorer", func() { + _, err := networkclient.ExecuteCommand("./stopexplorer.sh", []string{}, true) + Expect(err).NotTo(HaveOccurred()) + }) + + }) +}) diff --git a/app/platform/fabric/e2e-test/specs/genchannelartifacts.sh b/app/platform/fabric/e2e-test/specs/genchannelartifacts.sh new file mode 100755 index 000000000..13fc89e91 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/genchannelartifacts.sh @@ -0,0 +1,32 @@ +#!/bin/bash +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +CHPROFILE=testorgschannel +function genChTx() +{ + ch=$1 + pushd ${SCRIPTPATH}/configFiles + mkdir -p ../channel-artifacts/$ch + configtxgen -profile ${CHPROFILE} -outputCreateChannelTx ../channel-artifacts/${ch}/${ch}.tx -channelID ${ch} + popd +} + +function genAnchorTx() +{ + ch=$1 + org=$2 + pushd ${SCRIPTPATH}/configFiles + mkdir -p ../channel-artifacts/$ch + configtxgen -profile ${CHPROFILE} -outputAnchorPeersUpdate ../channel-artifacts/${ch}/${ch}org${org}anchor.tx -channelID ${ch} -asOrg org${org} + popd +} + +genChTx commonchannel +genChTx org1channel +genChTx channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422 +genChTx org2channel + +genAnchorTx commonchannel 1 +genAnchorTx commonchannel 2 +genAnchorTx org1channel 1 +genAnchorTx channel2422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422422 1 +genAnchorTx org2channel 2 diff --git a/app/platform/fabric/e2e-test/specs/go.mod b/app/platform/fabric/e2e-test/specs/go.mod new file mode 100644 index 000000000..f40c318bf --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/go.mod @@ -0,0 +1,28 @@ +module apitest + +// SPDX-License-Identifier: Apache-2.0 + +go 1.13 + +require ( + github.com/cosiner/argv v0.0.1 // indirect + github.com/go-delve/delve v1.4.0 // indirect + github.com/go-resty/resty/v2 v2.1.0 + github.com/hyperledger/fabric-test v1.4.5-0.20200212013951-45799a2ee4ee + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/mattn/go-colorable v0.1.4 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mattn/go-runewidth v0.0.8 // indirect + github.com/onsi/ginkgo v1.12.0 + github.com/onsi/gomega v1.9.0 + github.com/peterh/liner v1.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/sirupsen/logrus v1.4.2 // indirect + github.com/spf13/cobra v0.0.5 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.starlark.net v0.0.0-20200203144150-6677ee5c7211 // indirect + golang.org/x/arch v0.0.0-20191126211547-368ea8f32fff // indirect + golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6 // indirect + golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect +) diff --git a/app/platform/fabric/e2e-test/specs/go.sum b/app/platform/fabric/e2e-test/specs/go.sum new file mode 100644 index 000000000..5492c0482 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/go.sum @@ -0,0 +1,121 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cosiner/argv v0.0.0-20170225145430-13bacc38a0a5/go.mod h1:p/NrK5tF6ICIly4qwEDsf6VDirFiWWz0FenfYBwJaKQ= +github.com/cosiner/argv v0.0.1/go.mod h1:p/NrK5tF6ICIly4qwEDsf6VDirFiWWz0FenfYBwJaKQ= +github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-delve/delve v1.4.0/go.mod h1:gQM0ReOJLNAvPuKAXfjHngtE93C2yc/ekTbo7YbAHSo= +github.com/go-resty/resty/v2 v2.1.0 h1:Z6IefCpUMfnvItVJaJXWv/pMiiD11So35QgwEELsldE= +github.com/go-resty/resty/v2 v2.1.0/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hyperledger/fabric-test v1.4.4 h1:17Yu+p0QQZlswQFHYs9Jt/2hTgunsqInhMU8qXdJeDc= +github.com/hyperledger/fabric-test v1.4.4/go.mod h1:7K2xXfkAUybufYc/nYZEfJx/B84HnjHHsy6eKWZJyc4= +github.com/hyperledger/fabric-test v1.4.5-0.20200212013951-45799a2ee4ee h1:/nHTnB6bE9G/U+1k0C3xdYgcH87nwuIzMuS9vJkBwh0= +github.com/hyperledger/fabric-test v1.4.5-0.20200212013951-45799a2ee4ee/go.mod h1:fMKcxSNixj6MfTor0vaAPfjHJWA8baAcdA6j34xLHjY= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.0-20170327083344-ded68f7a9561/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterh/liner v0.0.0-20170317030525-88609521dc4b/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/peterh/liner v1.2.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v0.0.0-20170413231811-06b906832ed0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sirupsen/logrus v0.0.0-20180523074243-ea8897e79973/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.0-20170417170307-b6cb39589372/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170417173400-9e4c21054fa1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.starlark.net v0.0.0-20190702223751-32f345186213/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= +go.starlark.net v0.0.0-20200203144150-6677ee5c7211/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +golang.org/x/arch v0.0.0-20190927153633-4e8777c89be4/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= +golang.org/x/arch v0.0.0-20191126211547-368ea8f32fff/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/app/platform/fabric/e2e-test/specs/runexplorer.sh b/app/platform/fabric/e2e-test/specs/runexplorer.sh new file mode 100755 index 000000000..ff62c523e --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/runexplorer.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# SPDX-License-Identifier: Apache-2.0 + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +export NETWORK_MODE=$1 + +pushd ${SCRIPTPATH}/.. + +docker-compose down -v +docker-compose up -d explorerdb.mynetwork.com explorer.mynetwork.com +sleep 20 + +popd \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/specs/stopexplorer.sh b/app/platform/fabric/e2e-test/specs/stopexplorer.sh new file mode 100755 index 000000000..bff85c55b --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/stopexplorer.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# SPDX-License-Identifier: Apache-2.0 + +SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" + +pushd ${SCRIPTPATH}/.. + +docker-compose down -v + +popd \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/specs/templates/configtx.yaml b/app/platform/fabric/e2e-test/specs/templates/configtx.yaml new file mode 100644 index 000000000..27edefa7e --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/templates/configtx.yaml @@ -0,0 +1,138 @@ +#! Copyright IBM Corp. All Rights Reserved. +#! +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:data", "data") +#@ def orgPolicies(mspId, type, nodeOUs): +#@ output = {} +#@ if nodeOUs: +#@ readwriteset = {"Type": "Signature", "Rule": "OR('{}.admin', '{}.{}', '{}.client')".format(mspId, mspId, type, mspId)} +#@ admins = {"Type": "Signature", "Rule": "OR('{}.admin')".format(mspId)} +#@ else: +#@ readwriteset = {"Type": "Signature", "Rule": "OR('{}.member')".format(mspId, mspId, type, mspId)} +#@ admins = {"Type": "Signature", "Rule": "OR('{}.member')".format(mspId)} +#@ end +#@ output = {"Readers": readwriteset, "Writers": readwriteset, "Admins": admins} +#@ return output +#@ end +#@ def orgOrderers(ordererOrg, port, config): +#@ orderers = [] +#@ for i in range(0, ordererOrg.numOderers): +#@ if config.nodeportIP: +#@ orderers.append("{}:{}".format(config.nodeportIP, port)) +#@ else: +#@ orderers.append("orderer{}-{}:{}".format(i, ordererOrg.name, port)) +#@ end +#@ port += 1 +#@ end +#@ return orderers +#@ end +#@ def anchorPeers(orgName, port): +#@ anchorpeers = [] +#@ anchorpeers.append({"Host": "peer0-{}".format(orgName), "Port": port}) +#@ return anchorpeers +#@ end +#@ def organizations(config, type): +#@ output = [] +#@ artifactsLocation = config.artifactsLocation +#@ if artifactsLocation.endswith("/") == False: +#@ artifactsLocation = artifactsLocation + "/" +#@ end +#@ if type == "orderer": +#@ port1 = 30000 +#@ num_organizations = len(config.ordererOrganizations) +#@ for i in range(0, num_organizations): +#@ output.append({"Name": config.ordererOrganizations[i].name, "OrdererEndpoints": orgOrderers(config.ordererOrganizations[i], port1, config), "ID": config.ordererOrganizations[i].mspId, "MSPDir": artifactsLocation+"crypto-config/ordererOrganizations/"+config.ordererOrganizations[i].name+"/msp", "Policies": orgPolicies(config.ordererOrganizations[i].mspId, "orderer", config.enableNodeOUs)}) +#@ port1 += config.ordererOrganizations[i].numOderers +#@ end +#@ end +#@ if type == "peer": +#@ port2 = 31000 +#@ for i in range(0, len(config.peerOrganizations)): +#@ output.append({"Name": config.peerOrganizations[i].name, "AnchorPeers": anchorPeers(config.peerOrganizations[i].name, port2),"ID": config.peerOrganizations[i].mspId, "MSPDir": artifactsLocation+"crypto-config/peerOrganizations/"+config.peerOrganizations[i].name+"/msp", "Policies": orgPolicies(config.peerOrganizations[i].mspId, "peer", config.enableNodeOUs)}) +#@ port2 += config.peerOrganizations[i].numPeers +#@ end +#@ end +#@ return output +#@ end +#@ def policies(type): +#@ readers = {"Type": "ImplicitMeta", "Rule": "ANY Readers"} +#@ writers = {"Type": "ImplicitMeta", "Rule": "ANY Writers"} +#@ admins = {"Type": "ImplicitMeta", "Rule": "ANY Admins"} +#@ output = {} +#@ if type == "orderer": +#@ blockValidation = {"Type": "ImplicitMeta", "Rule": "ANY Writers"} +#@ output = {"Readers":readers, "Writers": writers, "Admins": admins, "BlockValidation": blockValidation} +#@ else: +#@ output = {"Readers":readers, "Writers": writers, "Admins": admins} +#@ end +#@ return output +#@ end +#@ def ordererHosts(config): +#@ result = [] +#@ port1 = 30000 +#@ num_organizations = len(config.ordererOrganizations) +#@ for i in range(0, num_organizations): +#@ ordererOrg = config.ordererOrganizations[i] +#@ numOderers = ordererOrg.numOderers +#@ for j in range(0, numOderers): +#@ result.append("orderer"+str(j)+"-"+ordererOrg.name+":{}".format(port1)) +#@ port1 += 1 +#@ end +#@ end +#@ return result +#@ end +#@ def orderer(config): +#@ result = {} +#@ batch_size_options = config.orderer.batchSize +#@ artifactsLocation = config.artifactsLocation +#@ if artifactsLocation.endswith("/") == False: +#@ artifactsLocation = artifactsLocation + "/" +#@ end +#@ batchSize = {"MaxMessageCount": batch_size_options.maxMessageCount, "AbsoluteMaxBytes": batch_size_options.absoluteMaxBytes, "PreferredMaxBytes": batch_size_options.preferredMaxBytes} +#@ if(config.orderer.ordererType=="etcdraft"): +#@ consenters = [] +#@ port1 = 30000 +#@ etcd_options = config.orderer.etcdraftOptions +#@ for i in range(0, len(config.ordererOrganizations)): +#@ for j in range(0, config.ordererOrganizations[i].numOderers): +#@ ordererOrg = config.ordererOrganizations[i] +#@ certs = "{}crypto-config/ordererOrganizations/{}/orderers/orderer{}-{}.{}/tls/server.crt".format(artifactsLocation, ordererOrg.name, j, ordererOrg.name, ordererOrg.name) +#@ consenters.append({"Host":"orderer{}-{}".format(j, ordererOrg.name), "Port": port1, "ClientTLSCert": certs, "ServerTLSCert": certs}) +#@ port1 += 1 +#@ end +#@ end +#@ options = {"TickInterval": etcd_options.tickInterval, "ElectionTick": etcd_options.electionTick, "HeartbeatTick": etcd_options.heartbeatTick, "MaxInflightBlocks": etcd_options.maxInflightBlocks, "SnapshotIntervalSize": etcd_options.snapshotIntervalSize} +#@ result = {"OrdererType": config.orderer.ordererType, "Addresses": ordererHosts(config), "BatchTimeout": config.orderer.batchTimeOut, "BatchSize": batchSize, "Organizations": organizations(config, "orderer"), "Policies": policies("orderer"), "Capabilities": config.ordererCapabilities, "EtcdRaft":{"Consenters":consenters, "Options":options}} +#@ elif (config.orderer.ordererType == "kafka"): +#@ brokersList = [] +#@ kafka = {} +#@ for i in range(0, config.kafka.numKafka): +#@ brokersList.append("kafka{}:9092".format(i)) +#@ end +#@ kafka = {"Brokers": brokersList} +#@ result = {"OrdererType": config.orderer.ordererType, "Addresses": ordererHosts(config), "BatchTimeout": config.orderer.batchTimeOut, "BatchSize": batchSize, "kafka": kafka, "Organizations": organizations(config, "orderer"),"Policies": policies("orderer"),"Capabilities": config.ordererCapabilities} +#@ else: +#@ result = {"OrdererType": config.orderer.ordererType, "Addresses": ordererHosts(config), "BatchTimeout": config.orderer.batchTimeOut, "BatchSize": batchSize, "Organizations": organizations(config, "orderer"), "Policies": policies("orderer"), "Capabilities": config.ordererCapabilities} +#@ end +#@ return result +#@ end + +#@ config = data.values +Profiles: + testorgschannel: + Policies: #@ policies("channel") + Capabilities: #@ config.channelCapabilities + Consortium: FabricConsortium + Application: + Organizations: #@ organizations(config, "peer") + Policies: #@ policies("peer") + Capabilities: #@ config.applicationCapabilities + Orderer: #@ orderer(config) + testOrgsOrdererGenesis: + Policies: #@ policies("channel") + Capabilities: #@ config.channelCapabilities + Orderer: #@ orderer(config) + Consortiums: + FabricConsortium: + Organizations: #@ organizations(config, "peer") \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/specs/templates/crypto-config.yaml b/app/platform/fabric/e2e-test/specs/templates/crypto-config.yaml new file mode 100755 index 000000000..7fea8f7d9 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/templates/crypto-config.yaml @@ -0,0 +1,38 @@ +#! Copyright IBM Corp. All Rights Reserved. +#! +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:data", "data") +#@ config = data.values +OrdererOrgs: +#@ num_organizations = len(config.ordererOrganizations) +#@ for i in range(0, num_organizations): +#@ ordererOrg = config.ordererOrganizations[i] +- Domain: #@ ordererOrg.name + Name: #@ ordererOrg.name + EnableNodeOUs: #@ config.enableNodeOUs + Specs: + #@ numOderers = ordererOrg.numOderers + #@ for i in range(0, numOderers): + - Hostname: #@ "orderer{}-{}".format(i, ordererOrg.name) + #@ if config.nodeportIP != "": + SANS: + - #@ config.nodeportIP + #@ end + #@ end +#@ end +PeerOrgs: +#@ for i in range(0, len(config.peerOrganizations)): +#@ peerOrg = config.peerOrganizations[i] +- Domain: #@ peerOrg.name + Name: #@ peerOrg.name + EnableNodeOUs: #@ config.enableNodeOUs + Specs: + #@ for i in range(0, peerOrg.numPeers): + - Hostname: #@ "peer{}-{}".format(i, peerOrg.name) + #@ if config.nodeportIP != "": + SANS: + - #@ config.nodeportIP + #@ end + #@ end +#@ end \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/specs/templates/docker/docker-compose.yaml b/app/platform/fabric/e2e-test/specs/templates/docker/docker-compose.yaml new file mode 100644 index 000000000..7993f83c0 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/templates/docker/docker-compose.yaml @@ -0,0 +1,235 @@ +#! Copyright IBM Corp. All Rights Reserved. +#! +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:data", "data") +#@ services = {} + +#@ def caList(config): +#@ caUniquePort = 32000 +#@ for i in range(0, len(config.peerOrganizations)): +#@ org = config.peerOrganizations[i] +#@ for j in range(0, org.numCa): +#@ container_name = "ca{}-{}".format(j, org.name) +#@ services[container_name] = ca(container_name, caUniquePort, "peer", org.name, config) +#@ caUniquePort += 1 +#@ end +#@ end +#@ for i in range(0, len(config.ordererOrganizations)): +#@ org = config.ordererOrganizations[i] +#@ for j in range(0, org.numCa): +#@ container_name = "ca{}-{}".format(j, org.name) +#@ services[container_name] = ca(container_name, caUniquePort, "orderer", org.name, config) +#@ caUniquePort += 1 +#@ end +#@ end +#@ end + +#@ def couchDB(config): +#@ couchDBUniquePort = 33000 +#@ for i in range(0, len(config.peerOrganizations)): +#@ org = config.peerOrganizations[i] +#@ for j in range(0, org.numPeers): +#@ container_name = "couchdb-peer{}-{}".format(j, org.name) +#@ services[container_name] = {"container_name":container_name, "image":"hyperledger/fabric-couchdb", "ports":["{}:5984".format(couchDBUniquePort)]} +#@ couchDBUniquePort += 1 +#@ end +#@ end +#@ end + +#@ def ca(container_name, caUniquePort, type, orgName, config): +#@ orgType = "{}Organizations".format(type) +#@ image = "hyperledger/fabric-ca:{}".format(config.fabricVersion) +#@ if config.fabricVersion.endswith("-stable"): +#@ image="hyperledger-fabric.jfrog.io/fabric-ca:amd64-{}".format(config.fabricVersion) +#@ end +#@ env = ["FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server"] +#@ env.append("FABRIC_CA_SERVER_CA_NAME={}".format(container_name)) +#@ env.append("FABRIC_CA_SERVER_CA_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca/ca.{}-cert.pem".format(orgName)) +#@ env.append("FABRIC_CA_SERVER_CA_KEYFILE=/etc/hyperledger/fabric-ca-server-config/ca/ca-priv_sk") +#@ if config.tls == "mutual": +#@ env.append("FABRIC_CA_SERVER_TLS_ENABLED=true") +#@ else: +#@ env.append("FABRIC_CA_SERVER_TLS_ENABLED={}".format(config.tls)) +#@ end +#@ env.append("FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/tlsca/tlsca.{}-cert.pem".format(orgName)) +#@ env.append("FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/tlsca/tlsca-priv_sk") +#@ ports = ["{}:{}".format(caUniquePort, 7054)] +#@ command = "sh -c 'fabric-ca-server start -b admin:adminpw -d'" +#@ volumes = ["{}crypto-config/{}/{}/ca/:/etc/hyperledger/fabric-ca-server-config/ca".format(artifactsLocation, orgType, orgName)] +#@ volumes.append("{}crypto-config/{}/{}/tlsca/:/etc/hyperledger/fabric-ca-server-config/tlsca".format(artifactsLocation, orgType, orgName)) +#@ ca = {"image":image, "environment":env, "ports":ports, "command":command, "volumes":volumes, "container_name":container_name} +#@ return ca +#@ end + +#@ def kafka_zookeepers(kafka): +#@ (zookeepers, kafka_zookeeper_connect) = ([], []) +#@ kafkaUniquePort = 2181 +#@ for i in range(0, kafka.numZookeepers): +#@ env = ["ZOO_MY_ID={}".format(i+1), "ZOO_PORT={}".format(kafkaUniquePort), "ZOO_SERVERS={}".format(zookeeper_servers(kafka.numZookeepers))] +#@ expose = [kafkaUniquePort, kafkaUniquePort+1, kafkaUniquePort+2] +#@ container_name = "zookeeper{}".format(i) +#@ zookeepers.append(container_name) +#@ kafka_zookeeper_connect.append("{}:{}".format(container_name, kafkaUniquePort)) +#@ services[container_name] = {"image":"hyperledger/fabric-zookeeper", "environment":env, "expose":expose, "container_name":container_name} +#@ kafkaUniquePort += 1000 +#@ end +#@ kafkaUniquePort = 9092 +#@ for i in range(0, kafka.numKafka): +#@ container_name = "kafka{}".format(i) +#@ env = ["KAFKA_BROKER_ID={}".format(i), "KAFKA_DEFAULT_REPLICATION_FACTOR={}".format(kafka.numKafkaReplications), "KAFKA_MESSAGE_MAX_BYTES=103809024", "KAFKA_REPLICA_FETCH_MAX_BYTES=103809024"] +#@ env.append("KAFKA_ZOOKEEPER_CONNECT={}".format(",".join(kafka_zookeeper_connect))) +#@ env.append("KAFKA_MIN_INSYNC_REPLICAS=2") +#@ env.append("KAFKA_UNCLEAN_LEADER_ELECTION_ENABLE=false") +#@ services[container_name] = {"image":"hyperledger/fabric-kafka", "environment":env, "depends_on":zookeepers, "container_name":container_name, "ports":["{}:{}".format(kafkaUniquePort+i,kafkaUniquePort)]} +#@ end +#@ end + +#@ def zookeeper_servers(numZookeepers): +#@ servers = [] +#@ zookeeperUniquePort = 2181 +#@ for i in range(0, numZookeepers): +#@ servers.append("server.{}=zookeeper{}:{}:{}:participant".format(i+1, i, zookeeperUniquePort+1, zookeeperUniquePort+2)) +#@ zookeeperUniquePort += 1000 +#@ end +#@ return " ".join(servers) +#@ end + +#@ def kafkaList(numKafka): +#@ kafka_list = [] +#@ for i in range(0, numKafka): +#@ kafka_list.append("kafka{}".format(i)) +#@ end +#@ return kafka_list +#@ end + +#@ def mutualTLS(config): +#@ output = [] +#@ for i in range(0, len(config.peerOrganizations)): +#@ organization = config.peerOrganizations[i] +#@ output.append("/etc/hyperledger/fabric/artifacts/msp/crypto-config/peerOrganizations/{}/ca/ca.{}-cert.pem".format(organization.name, organization.name)) +#@ end +#@ for j in range(0, len(config.ordererOrganizations)): +#@ organization = config.ordererOrganizations[j] +#@ output.append("/etc/hyperledger/fabric/artifacts/msp/crypto-config/ordererOrganizations/{}/ca/ca.{}-cert.pem".format(organization.name, organization.name)) +#@ end +#@ return output +#@ end + +#@ def orderers(config): +#@ (ordererUniquerPort, ordererHealthCheckPort) = (30000, 30100) +#@ for i in range(0, len(config.ordererOrganizations)): +#@ org = config.ordererOrganizations[i] +#@ for j in range(0, org.numOderers): +#@ env = ["FABRIC_LOGGING_SPEC={}".format(config.ordererFabricLoggingSpec), "ORDERER_GENERAL_LISTENADDRESS=0.0.0.0", "ORDERER_GENERAL_LISTENPORT={}".format(ordererUniquerPort), "ORDERER_GENERAL_GENESISMETHOD=file"] +#@ env.append("ORDERER_GENERAL_GENESISFILE=/etc/hyperledger/fabric/artifacts/msp/channel-artifacts/genesis.block") +#@ env.append("ORDERER_GENERAL_LOCALMSPID={}".format(org.mspId)) +#@ env.append("ORDERER_GENERAL_LOCALMSPDIR=/etc/hyperledger/fabric/artifacts/msp/crypto-config/ordererOrganizations/{}/orderers/orderer{}-{}.{}/msp".format(org.name, j, org.name, org.name)) +#@ if config.tls == "mutual": +#@ env.append("ORDERER_GENERAL_TLS_CLIENTROOTCAS=[{}]".format(", ".join(mutualTLS(config)))) +#@ env.append("ORDERER_GENERAL_TLS_CLIENTAUTHREQUIRED=true") +#@ env.append("ORDERER_GENERAL_TLS_ENABLED=true") +#@ else: +#@ env.append("ORDERER_GENERAL_TLS_ENABLED={}".format(config.tls)) +#@ end +#@ env.append("ORDERER_OPERATIONS_LISTENADDRESS=0.0.0.0:8443") +#@ env.append("ORDERER_GENERAL_TLS_PRIVATEKEY=/etc/hyperledger/fabric/artifacts/msp/crypto-config/ordererOrganizations/{}/orderers/orderer{}-{}.{}/tls/server.key".format(org.name, j, org.name, org.name)) +#@ env.append("ORDERER_GENERAL_TLS_CERTIFICATE=/etc/hyperledger/fabric/artifacts/msp/crypto-config/ordererOrganizations/{}/orderers/orderer{}-{}.{}/tls/server.crt".format(org.name, j, org.name, org.name)) +#@ env.append("ORDERER_GENERAL_TLS_ROOTCAS=[/etc/hyperledger/fabric/artifacts/msp/crypto-config/ordererOrganizations/{}/orderers/orderer{}-{}.{}/tls/server.crt]".format(org.name, j, org.name, org.name)) +#@ env.append("ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/etc/hyperledger/fabric/artifacts/msp/crypto-config/ordererOrganizations/{}/orderers/orderer{}-{}.{}/tls/server.key".format(org.name, j, org.name, org.name)) +#@ env.append("ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/etc/hyperledger/fabric/artifacts/msp/crypto-config/ordererOrganizations/{}/orderers/orderer{}-{}.{}/tls/server.crt".format(org.name, j, org.name, org.name)) +#@ volumes = ["{}:/etc/hyperledger/fabric/artifacts/msp/".format(artifactsLocation)] +#@ volumes.append("./backup/orderer{}-{}:/var/hyperledger/production/orderer".format(j, org.name)) +#@ container_name = "orderer{}-{}".format(j,org.name) +#@ image = "hyperledger/fabric-orderer:{}".format(config.fabricVersion) +#@ if config.fabricVersion.endswith("-stable"): +#@ image="hyperledger-fabric.jfrog.io/fabric-orderer:amd64-{}".format(config.fabricVersion) +#@ end +#@ ports = ["{}:{}".format(ordererUniquerPort,ordererUniquerPort), "{}:{}".format(ordererHealthCheckPort,8443)] +#@ services[container_name] = {"image":image, "environment":env, "working_dir":"/opt/gopath/src/github.com/hyperledger/fabric", "command":"orderer", "volumes":volumes, "ports":ports, "container_name":container_name} +#@ if config.orderer.ordererType == "kafka": +#@ services[container_name] = {"image":image, "environment":env, "working_dir":"/opt/gopath/src/github.com/hyperledger/fabric", "command":"orderer", "volumes":volumes, "ports":ports, "container_name":container_name, "depends_on":kafkaList(config.kafka.numKafka)} +#@ end +#@ ordererUniquerPort += 1 +#@ ordererHealthCheckPort += 1 +#@ end +#@ end +#@ end + +#@ def peers(config): +#@ (peerUniquerPort, peerHealthCheckPort) = (31000, 31100) +#@ for i in range(0, len(config.peerOrganizations)): +#@ org = config.peerOrganizations[i] +#@ for j in range(0, org.numPeers): +#@ env = ["CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock", "FABRIC_LOGGING_SPEC={}".format(config.peerFabricLoggingSpec), "CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=configfiles_default"] +#@ if config.gossipEnable == True: +#@ env.append("CORE_PEER_GOSSIP_STATE_ENABLED=true") +#@ env.append("CORE_PEER_GOSSIP_ORGLEADER=false") +#@ env.append("CORE_PEER_GOSSIP_USELEADERELECTION=true") +#@ else: +#@ env.append("CORE_PEER_GOSSIP_STATE_ENABLED=false") +#@ env.append("CORE_PEER_GOSSIP_ORGLEADER=true") +#@ env.append("CORE_PEER_GOSSIP_USELEADERELECTION=false") +#@ end +#@ env.append("CORE_PEER_GOSSIP_ENDPOINT=peer{}-{}:{}".format(j, org.name, peerUniquerPort)) +#@ env.append("CORE_PEER_LISTENADDRESS=0.0.0.0:{}".format(peerUniquerPort)) +#@ env.append("CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:{}".format(7052)) +#@ env.append("CORE_CHAINCODE_EXECUTETIMEOUT=1500s") +#@ env.append("CORE_PEER_ID=peer{}-{}".format(j, org.name)) +#@ env.append("CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/fabric/artifacts/msp/crypto-config/peerOrganizations/{}/peers/peer{}-{}.{}/msp".format(org.name, j, org.name, org.name)) +#@ env.append("CORE_PEER_LOCALMSPID={}".format(org.mspId)) +#@ env.append("CORE_PEER_ADDRESS=peer{}-{}:{}".format(j, org.name, peerUniquerPort)) +#@ env.append("CORE_OPERATIONS_LISTENADDRESS=0.0.0.0:9443") +#@ env.append("CORE_PEER_CHAINCODEADDRESS=peer{}-{}:{}".format(j, org.name, 7052)) +#@ env.append("CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer{}-{}:{}".format(j, org.name, peerUniquerPort)) +#@ if config.tls == "mutual": +#@ env.append("CORE_PEER_TLS_CLIENTROOTCAS_FILES={}".format(" ".join(mutualTLS(config)))) +#@ env.append("CORE_PEER_TLS_CLIENTAUTHREQUIRED=true") +#@ env.append("CORE_PEER_TLS_ENABLED=true") +#@ else: +#@ env.append("CORE_PEER_TLS_ENABLED={}".format(config.tls)) +#@ end +#@ env.append("CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/artifacts/msp/crypto-config/peerOrganizations/{}/peers/peer{}-{}.{}/tls/server.crt".format(org.name, j, org.name, org.name)) +#@ env.append("CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/artifacts/msp/crypto-config/peerOrganizations/{}/peers/peer{}-{}.{}/tls/server.key".format(org.name, j, org.name, org.name)) +#@ env.append("CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/artifacts/msp/crypto-config/peerOrganizations/{}/peers/peer{}-{}.{}/tls/ca.crt".format(org.name, j, org.name, org.name)) +#@ container_name = "peer{}-{}".format(j, org.name) +#@ volumes = ["{}:/etc/hyperledger/fabric/artifacts/msp/".format(artifactsLocation)] +#@ volumes.append("/var/run/:/host/var/run/") +#@ volumes.append("./backup/peer{}-{}:/var/hyperledger/production".format(j, org.name)) +#@ image = "hyperledger/fabric-peer:{}".format(config.fabricVersion) +#@ if config.fabricVersion.endswith("-stable"): +#@ env.append("CORE_CHAINCODE_BUILDER=hyperledger-fabric.jfrog.io/fabric-ccenv:amd64-{}".format(config.fabricVersion)) +#@ env.append("CORE_CHAINCODE_JAVA_RUNTIME=hyperledger-fabric.jfrog.io/fabric-javaenv:amd64-{}".format(config.fabricVersion)) +#@ env.append("CORE_CHAINCODE_GOLANG_RUNTIME=hyperledger/fabric-baseos:amd64-0.4.18") +#@ env.append("CORE_CHAINCODE_NODE_RUNTIME=hyperledger/fabric-baseimage:amd64-0.4.18") +#@ image="hyperledger-fabric.jfrog.io/fabric-peer:amd64-{}".format(config.fabricVersion) +#@ end +#@ ports = ["7051", "{}:{}".format(peerUniquerPort,peerUniquerPort), "{}:{}".format(peerHealthCheckPort,9443)] +#@ services[container_name] = {"image":image, "environment":env, "volumes":volumes, "ports":ports, "working_dir":"/opt/gopath/src/github.com/hyperledger/fabric/peer", "command":"peer node start", "container_name":container_name} +#@ if config.dbType == "couchdb": +#@ env.append("CORE_LEDGER_STATE_STATEDATABASE=CouchDB") +#@ env.append("CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb-{}:5984".format(container_name)) +#@ services[container_name] = {"image":image, "environment":env, "volumes":volumes, "ports":ports, "working_dir":"/opt/gopath/src/github.com/hyperledger/fabric/peer", "command":"peer node start", "container_name":container_name, "depends_on":["couchdb-{}".format(container_name)]} +#@ end +#@ peerUniquerPort += 1 +#@ peerHealthCheckPort += 1 +#@ end +#@ end +#@ end + +#@ config = data.values +#@ artifactsLocation = config.artifactsLocation +#@ if artifactsLocation.endswith("/") == False: +#@ artifactsLocation = artifactsLocation + "/" +#@ end +version: '2' +#@ caList(config) +#@ if config.orderer.ordererType == "kafka": +#@ kafka_zookeepers(config.kafka) +#@ end +#@ if config.dbType == "couchdb": +#@ couchDB(config) +#@ end +#@ peers(config) +#@ orderers(config) +services: #@ services diff --git a/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-pods.yaml b/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-pods.yaml new file mode 100755 index 000000000..e592fe1e2 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-pods.yaml @@ -0,0 +1,411 @@ +#! Copyright IBM Corp. All Rights Reserved. +#! +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:data", "data") +#@ def zkContainers(input, id, zklist, config): +#@ id = id + 1 +#@ env = [{ "name": "ZOO_MY_ID", "value": "{}".format(id)}] +#@ env.append({"name": "ZOO_SERVERS", "value": "{}".format(zklist)}) +#@ env.append({"name": "ZOO_TICK_TIME", "value": "2000"}) +#@ env.append({"name": "ZOO_INIT_LIMIT", "value": "10"}) +#@ env.append({"name": "ZOO_SYNC_LIMIT", "value": "2"}) +#@ resources = {"limits": {"cpu": "0.2", "memory": "0.4Gi"}, "requests": {"cpu": "0.1", "memory": "0.2Gi"}} +#@ output = [] +#@ if config.k8s.dataPersistence == True: +#@ output = [{"volumeMounts": [{"mountPath": "/data", "name": "zookeeper-data-storage"}], "name": input, "image": "hyperledger/fabric-zookeeper", "imagePullPolicy": "Always", "env": env, "resources": resources}] +#@ else: +#@ output = [{"name": input, "image": "hyperledger/fabric-zookeeper", "imagePullPolicy": "Always", "env": env, "resources": resources}] +#@ end +#@ return output +#@ end + +#@ def kafkaContainers(input, id, replicas, zklist, config): +#@ id = id + 1 +#@ env = [{ "name": "KAFKA_BROKER_ID", "value": "{}".format(id)}] +#@ env.append({"name": "KAFKA_ZOOKEEPER_CONNECT", "value": "{}".format(zklist)}) +#@ env.append({"name": "KAFKA_DEFAULT_REPLICATION_FACTOR", "value": "{}".format(replicas)}) +#@ env.append({"name": "KAFKA_MAX_REQUEST_SIZE", "value": "104857600"}) +#@ env.append({"name": "KAFKA_UNCLEAN_LEADER_ELECTION_ENABLE", "value": "true"}) +#@ env.append({"name": "KAFKA_MIN_INSYNC_REPLICAS", "value": "2"}) +#@ env.append({"name": "KAFKA_LOG_DIRS", "value": "/opt/kafka/data" }) +#@ env.append({"name": "KAFKA_MESSAGE_MAX_BYTES", "value": "103809024"}) +#@ env.append({"name": "KAFKA_REPLICA_FETCH_MAX_BYTES", "value": "103809024"}) +#@ resources = config.k8s.resources.kafka +#@ output = [] +#@ if config.k8s.dataPersistence == True: +#@ output = [{"volumeMounts": [{"mountPath": "/opt/kafka/data", "name": "kafka-data-storage"}], "name": input, "image": "hyperledger/fabric-kafka", "imagePullPolicy": "Always", "env": env, "resources": resources}] +#@ else: +#@ output = [{"name": input, "image": "hyperledger/fabric-kafka", "imagePullPolicy": "Always", "env": env, "resources": resources}] +#@ end +#@ return output +#@ end + +#@ def mounts(type, nodeOUs): +#@ volumeMounts = [] +#@ if type == "ca": +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/artifacts/", "name": "cacerts"}) +#@ else: +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/artifacts/msp/admincerts/", "name": "admincerts"}) +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/artifacts/msp/cacerts/", "name": "cacerts"}) +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/artifacts/msp/signcerts/", "name": "signcerts"}) +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/artifacts/msp/keystore/", "name": "keystore"}) +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/artifacts/msp/tlscacerts/", "name": "tlscacerts"}) +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/artifacts/tls/", "name": "tls"}) +#@ if nodeOUs: +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/artifacts/msp/", "name": "config"}) +#@ end +#@ end +#@ return volumeMounts +#@ end + +#@ def caContainers(input, orgName, config): +#@ ca_image = "hyperledger/fabric-ca:{}".format(config.fabricVersion) +#@ if config.fabricVersion.endswith("-stable"): +#@ ca_image="hyperledger-fabric.jfrog.io/fabric-ca:amd64-{}".format(config.fabricVersion) +#@ end +#@ env = [{"name": "FABRIC_CA_HOME", "value": "/etc/hyperledger/fabric-ca-server"}] +#@ env.append({"name": "FABRIC_CA_SERVER_CA_NAME", "value": "{}".format(input)}) +#@ env.append({"name": "FABRIC_CA_SERVER_CA_KEYFILE", "value": "/etc/hyperledger/fabric/artifacts/ca-priv_sk"}) +#@ env.append({"name": "FABRIC_CA_SERVER_CA_CERTFILE", "value": "/etc/hyperledger/fabric/artifacts/ca.{}-cert.pem".format(orgName)}) +#@ if config.tls == "mutual": +#@ env.append({"name": "FABRIC_CA_SERVER_TLS_ENABLED", "value": "true"}) +#@ else: +#@ env.append({"name": "FABRIC_CA_SERVER_TLS_ENABLED", "value": "{}".format(config.tls)}) +#@ end +#@ env.append({"name": "FABRIC_CA_SERVER_TLS_KEYFILE", "value": "/etc/hyperledger/fabric/artifacts/tlsca-priv_sk"}) +#@ env.append({"name": "FABRIC_CA_SERVER_TLS_CERTFILE", "value": "/etc/hyperledger/fabric/artifacts/tlsca.{}-cert.pem".format(orgName)}) +#@ resources = {"limits": {"cpu": "0.1", "memory": "0.2Gi"}, "requests": {"cpu": "0.1", "memory": "0.2Gi"}} +#@ volumeMounts = mounts("ca", config.enableNodeOUs) +#@ output = [{"name": input, "image": ca_image, "imagePullPolicy": "Always", "env": env, "resources": resources, "volumeMounts": volumeMounts, "command": ["fabric-ca-server"], "args": ["start", "-b", "admin: adminpw", "-d"]}] +#@ return output +#@ end + +#@ def mutualTLS(config, type): +#@ output = [] +#@ for i in range(0, len(config.peerOrganizations)): +#@ organization = config.peerOrganizations[i] +#@ if type == "clientrootca": +#@ output.append("/etc/hyperledger/fabric/artifacts/{}/ca.{}-cert.pem".format(organization.name, organization.name)) +#@ elif type == "volumeMounts": +#@ output.append({"mountPath": "/etc/hyperledger/fabric/artifacts/{}".format(organization.name), "name": "{}-clientrootca".format(organization.name)}) +#@ elif type == "volumes": +#@ output.append({"name": "{}-clientrootca".format(organization.name), "configMap": {"name": "{}-ca".format(organization.name)}}) +#@ end +#@ end +#@ for j in range(0, len(config.ordererOrganizations)): +#@ organization = config.ordererOrganizations[j] +#@ if type == "clientrootca": +#@ output.append("/etc/hyperledger/fabric/artifacts/{}/ca.{}-cert.pem".format(organization.name, organization.name)) +#@ elif type == "volumeMounts": +#@ output.append({"mountPath": "/etc/hyperledger/fabric/artifacts/{}".format(organization.name), "name": "{}-clientrootca".format(organization.name)}) +#@ elif type == "volumes": +#@ output.append({"name": "{}-clientrootca".format(organization.name), "configMap": {"name": "{}-ca".format(organization.name)}}) +#@ end +#@ end +#@ return output +#@ end + +#@ def peerContainers(input, orgName, config, mspId, peerUniquePort): +#@ endpoint = input +#@ if config.nodeportIP: +#@ endpoint = config.nodeportIP +#@ end +#@ env = [{"name": "CORE_VM_ENDPOINT", "value": "localhost:2375"}] +#@ env.append({"name": "CORE_PEER_LISTENADDRESS", "value": "0.0.0.0:{}".format(peerUniquePort)}) +#@ env.append({"name": "CORE_PEER_CHAINCODELISTENADDRESS", "value": "0.0.0.0:7052"}) +#@ if config.gossipEnable == True: +#@ env.append({"name": "CORE_PEER_GOSSIP_STATE_ENABLED", "value": "true"}) +#@ env.append({"name": "CORE_PEER_GOSSIP_USELEADERELECTION", "value": "true"}) +#@ env.append({"name": "CORE_PEER_GOSSIP_ORGLEADER", "value": "false"}) +#@ else: +#@ env.append({"name": "CORE_PEER_GOSSIP_STATE_ENABLED", "value": "false"}) +#@ env.append({"name": "CORE_PEER_GOSSIP_USELEADERELECTION", "value": "false"}) +#@ env.append({"name": "CORE_PEER_GOSSIP_ORGLEADER", "value": "true"}) +#@ end +#@ if config.tls == "mutual": +#@ env.append({"name": "CORE_PEER_TLS_CLIENTROOTCAS_FILES", "value": " ".join(mutualTLS(config, "clientrootca")) }) +#@ env.append({"name": "CORE_PEER_TLS_CLIENTAUTHREQUIRED", "value": "true"}) +#@ env.append({"name": "CORE_PEER_TLS_ENABLED", "value": "true"}) +#@ else: +#@ env.append({"name": "CORE_PEER_TLS_ENABLED", "value": "{}".format(config.tls)}) +#@ end +#@ env.append({"name": "FABRIC_LOGGING_SPEC", "value": config.peerFabricLoggingSpec}) +#@ env.append({"name": "CORE_PEER_TLS_CERT_FILE", "value": "/etc/hyperledger/fabric/artifacts/tls/server.crt"}) +#@ env.append({"name": "CORE_PEER_TLS_KEY_FILE", "value": "/etc/hyperledger/fabric/artifacts/tls/server.key"}) +#@ env.append({"name": "CORE_PEER_TLS_ROOTCERT_FILE", "value": "/etc/hyperledger/fabric/artifacts/msp/tlscacerts/tlsca.{}-cert.pem".format(orgName)}) +#@ env.append({"name": "CORE_PEER_ID", "value": "{}".format(input)}) +#@ env.append({"name": "CORE_PEER_GOSSIP_EXTERNALENDPOINT", "value": "{}:{}".format(endpoint, peerUniquePort)}) +#@ env.append({"name": "CORE_PEER_ADDRESS", "value": "{}:{}".format(input, peerUniquePort)}) +#@ env.append({"name": "CORE_PEER_CHAINCODEADDRESS", "value": "localhost:7052"}) +#@ env.append({"name": "CORE_CHAINCODE_EXECUTETIMEOUT", "value": "1500s"}) +#@ env.append({"name": "CORE_PEER_LOCALMSPID", "value": "{}".format(mspId)}) +#@ env.append({"name": "CORE_PEER_MSPCONFIGPATH", "value": "/etc/hyperledger/fabric/artifacts/msp"}) +#@ env.append({"name": "CORE_PEER_FILESYSTEMPATH", "value": "/shared/data"}) +#@ env.append({"name": "CORE_PEER_GOSSIP_BOOTSTRAP", "value": "{}:{}".format(input, peerUniquePort)}) +#@ env.append({"name": "CORE_OPERATIONS_LISTENADDRESS", "value": ":9443"}) +#@ env.append({"name": "CORE_CHAINCODE_EXECUTETIMEOUT", "value": "1500s"}) +#@ if config.metrics == True: +#@ env.append({"name": "CORE_OPERATIONS_TLS_ENABLED", "value": "false" }) +#@ env.append({"name": "CORE_METRICS_PROVIDER", "value": "prometheus" }) +#@ end +#@ volumeMounts = mounts("peer", config.enableNodeOUs) +#@ if config.tls == "mutual": +#@ volumeMounts += mutualTLS(config, "volumeMounts") +#@ end +#@ if config.k8s.dataPersistence == True or config.k8s.dataPersistence == "local": +#@ volumeMounts.append({"mountPath": "/shared/data", "name": "peer-data-storage"}) +#@ end +#@ resources = config.k8s.resources.peers +#@ dindResources = config.k8s.resources.dind +#@ peer_image = "hyperledger/fabric-peer:{}".format(config.fabricVersion) +#@ if config.fabricVersion.endswith("-stable"): +#@ env.append({"name": "CORE_CHAINCODE_BUILDER", "value": "hyperledger-fabric.jfrog.io/fabric-ccenv:amd64-{}".format(config.fabricVersion)}) +#@ env.append({"name": "CORE_CHAINCODE_JAVA_RUNTIME", "value": "hyperledger-fabric.jfrog.io/fabric-javaenv:amd64-{}".format(config.fabricVersion)}) +#@ env.append({"name": "CORE_CHAINCODE_GOLANG_RUNTIME", "value": "hyperledger/fabric-baseos:amd64-0.4.18"}) +#@ env.append({"name": "CORE_CHAINCODE_NODE_RUNTIME", "value": "hyperledger/fabric-baseimage:amd64-0.4.18"}) +#@ peer_image="hyperledger-fabric.jfrog.io/fabric-peer:amd64-{}".format(config.fabricVersion) +#@ end + +#@ if config.dbType == "couchdb": +#@ container = {} +#@ env.append({"name": "CORE_LEDGER_STATE_STATEDATABASE", "value": "CouchDB"}) +#@ env.append({"name": "CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS", "value": "localhost:5984"}) +#@ if config.k8s.dataPersistence == True or config.k8s.dataPersistence == "local": +#@ couchdbMount = [{"mountPath": "/opt/couchdb/data", "name": "couchdb-data-storage"}] +#@ container = {"name": "couchdb-{}".format(input), "image": "hyperledger/fabric-couchdb", "imagePullPolicy": "Always", "resources": config.k8s.resources.couchdb, "volumeMounts": couchdbMount} +#@ else: +#@ container = {"name": "couchdb-{}".format(input), "image": "hyperledger/fabric-couchdb", "imagePullPolicy": "Always", "resources": config.k8s.resources.couchdb} +#@ end +#@ output = [{"name": "dind", "image": "docker:dind", "args": ["dockerd", "-H tcp://0.0.0.0:2375"], "securityContext": {"privileged": True}, "resources": dindResources}, {"name": input, "image": peer_image, "imagePullPolicy": "Always", "env": env, "volumeMounts": volumeMounts, "command": ["peer"], "args": ["node", "start"], "resources": resources}, container] +#@ else: +#@ output = [{"name": "dind", "image": "docker:dind", "args": ["dockerd", "-H tcp://0.0.0.0:2375"], "securityContext": {"privileged": True}, "resources": dindResources}, {"name": input, "image": peer_image, "imagePullPolicy": "Always", "env": env, "volumeMounts": volumeMounts, "command": ["peer"], "args": ["node", "start"], "resources": resources}] +#@ end +#@ return output +#@ end + +#@ def ordererContainers(input, orgName, config, mspId, ordererUniquePort): +#@ env = [{"name": "ORDERER_GENERAL_LISTENADDRESS", "value": "0.0.0.0"}] +#@ env.append({"name": "ORDERER_GENERAL_LISTENPORT", "value": "{}".format(ordererUniquePort)}) +#@ env.append({"name": "ORDERER_GENERAL_GENESISMETHOD", "value": "file"}) +#@ env.append({"name": "FABRIC_LOGGING_SPEC", "value": config.ordererFabricLoggingSpec}) +#@ if config.tls == "mutual": +#@ env.append({"name": "ORDERER_GENERAL_TLS_CLIENTROOTCAS", "value": "[{}]".format(", ".join(mutualTLS(config, "clientrootca"))) }) +#@ env.append({"name": "ORDERER_GENERAL_TLS_CLIENTAUTHREQUIRED", "value": "true"}) +#@ env.append({"name": "ORDERER_GENERAL_TLS_ENABLED", "value": "true"}) +#@ else: +#@ env.append({"name": "ORDERER_GENERAL_TLS_ENABLED", "value": "{}".format(config.tls)}) +#@ end +#@ env.append({"name": "ORDERER_GENERAL_GENESISFILE", "value": "/etc/hyperledger/fabric/genesisblock/genesis.block"}) +#@ env.append({"name": "ORDERER_GENERAL_LOCALMSPID", "value": "{}".format(mspId)}) +#@ env.append({"name": "ORDERER_GENERAL_LOCALMSPDIR", "value": "/etc/hyperledger/fabric/artifacts/msp"}) +#@ env.append({"name": "ORDERER_GENERAL_TLS_SERVERHOSTOVERRIDE", "value": input}) +#@ env.append({"name": "ORDERER_GENERAL_TLS_PRIVATEKEY", "value": "/etc/hyperledger/fabric/artifacts/tls/server.key"}) +#@ env.append({"name": "ORDERER_GENERAL_TLS_CERTIFICATE", "value": "/etc/hyperledger/fabric/artifacts/tls/server.crt"}) +#@ env.append({"name": "ORDERER_GENERAL_TLS_ROOTCAS", "value": "[/etc/hyperledger/fabric/artifacts/msp/tlscacerts/tlsca.{}-cert.pem]".format(orgName)}) +#@ env.append({"name": "ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY", "value": "/etc/hyperledger/fabric/artifacts/tls/server.key"}) +#@ env.append({"name": "ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE", "value": "/etc/hyperledger/fabric/artifacts/tls/server.crt"}) +#@ env.append({"name": "ORDERER_FILELEDGER_LOCATION", "value": "/shared/data"}) +#@ env.append({"name": "ORDERER_CONSENSUS_WALDIR", "value": "/shared/data/etcdraft/wal"}) +#@ env.append({"name": "ORDERER_CONSENSUS_SNAPDIR", "value": "/shared/data/etcdraft/snapshot"}) +#@ env.append({"name": "ORDERER_OPERATIONS_LISTENADDRESS", "value": ":8443"}) +#@ if config.metrics == True: +#@ env.append({"name": "ORDERER_OPERATIONS_TLS_ENABLED", "value": "false" }) +#@ env.append({"name": "ORDERER_METRICS_PROVIDER", "value": "prometheus" }) +#@ end +#@ volumeMounts = mounts("orderer", config.enableNodeOUs) +#@ volumeMounts.append({"mountPath": "/etc/hyperledger/fabric/genesisblock", "name": "genesisblock"}) +#@ if config.tls == "mutual": +#@ volumeMounts += mutualTLS(config, "volumeMounts") +#@ end +#@ if config.k8s.dataPersistence == True or config.k8s.dataPersistence == "local": +#@ volumeMounts.append({"mountPath": "/shared/data", "name": "orderer-data-storage"}) +#@ end +#@ resources = config.k8s.resources.orderers +#@ orderer_image = "hyperledger/fabric-orderer:{}".format(config.fabricVersion) +#@ if config.fabricVersion.endswith("-stable"): +#@ orderer_image="hyperledger-fabric.jfrog.io/fabric-orderer:amd64-{}".format(config.fabricVersion) +#@ end +#@ output = [{"name": input, "image": orderer_image, "imagePullPolicy": "Always", "env": env, "resources": resources, "volumeMounts": volumeMounts, "command": ["orderer"]}] +#@ return output +#@ end + +#@ def kafkaSpec(input, id, replicas, type, numZK, config): +#@ type = {} +#@ specData = {} +#@ if input.startswith("zookeeper"): +#@ type = "zookeeper" +#@ if config.k8s.dataPersistence == True: +#@ specData = {"volumes": [{"name": "zookeeper-data-storage", "persistentVolumeClaim": {"claimName": "{}-data".format(input)}}], "affinity": {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{"weight": 1, "podAffinityTerm": {"labelSelector": {"matchExpressions": [{"key": "type", "operator": "In", "values": ["zookeeper"]}]}, "topologyKey": "kubernetes.io/hostname"}}]}}, +#@ "containers": zkContainers(input, id, zkList(numZK, type), config)} +#@ else: +#@ specData = {"affinity": {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{"weight": 1, "podAffinityTerm": {"labelSelector": {"matchExpressions": [{"key": "type", "operator": "In", "values": ["zookeeper"]}]}, "topologyKey": "kubernetes.io/hostname"}}]}}, +#@ "containers": zkContainers(input, id, zkList(numZK, type), config)} +#@ end +#@ elif input.startswith("kafka"): +#@ type = "kafka" +#@ if config.k8s.dataPersistence == True: +#@ specData = {"volumes": [{"name": "kafka-data-storage", "persistentVolumeClaim": {"claimName": "{}-data".format(input)}}], "affinity": {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{"weight": 1, "podAffinityTerm": {"labelSelector": {"matchExpressions": [{"key": "type", "operator": "In", "values": ["kafka"]}]}, "topologyKey": "kubernetes.io/hostname"}}]}}, +#@ "containers": kafkaContainers(input, id, replicas, zkList(numZK, type), config)} +#@ else: +#@ specData = {"affinity": {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{"weight": 1, "podAffinityTerm": {"labelSelector": {"matchExpressions": [{"key": "type", "operator": "In", "values": ["kafka"]}]}, "topologyKey": "kubernetes.io/hostname"}}]}}, +#@ "containers": kafkaContainers(input, id, replicas, zkList(numZK, type), config)} +#@ end +#@ end +#@ return specData +#@ end + +#@ def spec(input, orgName, type, config, mspId, port): +#@ specData = {} +#@ metadata = {} +#@ initVolumes = [{"name": "cacerts", "configMap": {"name": "{}-msp".format(input), "items": [{"key": "cacerts", "path": "ca.{}-cert.pem".format(orgName)}]}}, +#@ {"name": "signcerts", "configMap": {"name": "{}-msp".format(input), "items": [{"key": "signcerts", "path": "{}.{}-cert.pem".format(input, orgName)}]}}, +#@ {"name": "tlscacerts", "configMap": {"name": "{}-msp".format(input), "items": [{"key": "tlscacerts", "path": "tlsca.{}-cert.pem".format(orgName)}]}}, +#@ {"name": "keystore", "configMap": {"name": "{}-msp".format(input), "items": [{"key": "keystore", "path": "priv_sk"}]}}, +#@ {"name": "tls", "configMap": {"name": "{}-tls".format(input)}}] +#@ if config.enableNodeOUs: +#@ initVolumes.append({"name": "admincerts", "configMap": {"name": "{}-admincerts".format(orgName)}}) +#@ initVolumes.append({"name": "config", "configMap": {"name": "{}-msp".format(input), "items": [{"key": "config", "path": "config.yaml"}]}}) +#@ else: +#@ initVolumes.append({"name": "admincerts", "configMap": {"name": "{}-admincerts".format(orgName), "items": [{"key": "admincerts", "path": "Admin@{}-cert.pem".format(orgName)}]}}) +#@ end +#@ if type == "ca": +#@ volumes = [{"name": "cacerts", "configMap": {"name": "{}-ca".format(orgName)}}] +#@ specData = {"volumes": volumes, "affinity": {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{"weight": 1, "podAffinityTerm": {"labelSelector": {"matchExpressions": [{"key": "type", "operator": "In", "values": ["{}".format(type)]}]}, "topologyKey": "kubernetes.io/hostname"}}]}}, +#@ "containers": caContainers(input, orgName, config)} +#@ elif type == "orderer": +#@ volumes = initVolumes +#@ volumes.append({"name": "genesisblock", "secret": {"secretName": "genesisblock"}}) +#@ if config.k8s.dataPersistence == True: +#@ volumes.append({"name": "orderer-data-storage", "persistentVolumeClaim": {"claimName": "{}-data".format(input)}}) +#@ elif config.k8s.dataPersistence == "local": +#@ volumes.append({"name": "orderer-data-storage", "hostPath": {"path": "/shared/{}-data".format(input)}}) +#@ end +#@ if config.tls == "mutual": +#@ volumes += mutualTLS(config, "volumes") +#@ end +#@ specData = {"volumes": volumes, "affinity": {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{"weight": 1, "podAffinityTerm": {"labelSelector": {"matchExpressions": [{"key": "type", "operator": "In", "values": ["{}".format(type)]}]}, "topologyKey": "kubernetes.io/hostname"}}]}}, +#@ "containers": ordererContainers(input, orgName, config, mspId, port)} +#@ elif type == "peer": +#@ volumes = initVolumes +#@ if config.k8s.dataPersistence == True: +#@ volumes.append({"name": "peer-data-storage", "persistentVolumeClaim": {"claimName": "{}-data".format(input)}}) +#@ if config.dbType == "couchdb": +#@ volumes.append({"name": "couchdb-data-storage", "persistentVolumeClaim": {"claimName": "couchdb-{}-data".format(input)}}) +#@ end +#@ elif config.k8s.dataPersistence == "local": +#@ volumes.append({"name": "peer-data-storage", "hostPath": {"path": "/shared/{}-data".format(input)}}) +#@ if config.dbType == "couchdb": +#@ volumes.append({"name": "couchdb-data-storage", "hostPath": {"path": "/shared/couchdb-{}-data".format(input)}}) +#@ end +#@ end +#@ if config.tls == "mutual": +#@ volumes += mutualTLS(config, "volumes") +#@ end +#@ specData = {"volumes": volumes, "affinity": {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{"weight": 1, "podAffinityTerm": {"labelSelector": {"matchExpressions": [{"key": "type", "operator": "In", "values": ["{}".format(type)]}]}, "topologyKey": "kubernetes.io/hostname"}}]}}, +#@ "containers": peerContainers(input, orgName, config, mspId, port)} +#@ end +#@ selector = {"matchLabels": {"k8s-app": input, "type": type}} +#@ if config.metrics == True: +#@ if type == "orderer": +#@ metadata = {"labels": {"k8s-app": input, "type": type}, "annotations": {"prometheus.io/scrape": "true", "prometheus.io/path": "/metrics", "prometheus.io/port": "8443", "prometheus.io/scheme": "http"}} +#@ elif type == "peer": +#@ metadata = {"labels": {"k8s-app": input, "type": type}, "annotations": {"prometheus.io/scrape": "true", "prometheus.io/path": "/metrics", "prometheus.io/port": "9443", "prometheus.io/scheme": "http"}} +#@ else: +#@ metadata = {"labels": {"k8s-app": input, "type": type}} +#@ end +#@ else: +#@ metadata = {"labels": {"k8s-app": input, "type": type}} +#@ end +#@ template = {"metadata": metadata, "spec": specData} +#@ output = {"selector": selector, "serviceName": input, "replicas": 1, "template": template} +#@ return output +#@ end + +#@ def zkList(numZK, type): +#@ output = [] +#@ for i in range(0, config.kafka.numZookeepers): +#@ id = i + 1 +#@ if type == "zookeeper": +#@ id = i + 1 +#@ output.append("server.{}=zookeeper{}:2888:3888".format(id, i)) +#@ zkList = " ".join(output) +#@ elif type == "kafka": +#@ output.append("zookeeper{}:2181".format(i)) +#@ zkList = ", ".join(output) +#@ end +#@ end +#@ return zkList +#@ end + +#@ config = data.values +#@ if config.orderer.ordererType == "kafka": +#@ for i in range(0, config.kafka.numZookeepers): +--- +apiVersion: v1 +kind: Pod +metadata: + name: #@ "zookeeper{}".format(i) + labels: + k8s-app: #@ "zookeeper{}".format(i) + type: zookeeper +spec: #@ kafkaSpec("zookeeper{}".format(i), i, 0, "zookeeper", config.kafka.numZookeepers, config) +#@ end +#@ for j in range(0, config.kafka.numKafka): +--- +apiVersion: v1 +kind: Pod +metadata: + name: #@ "kafka{}".format(j) + labels: + k8s-app: #@ "kafka{}".format(j) + type: kafka +spec: #@ kafkaSpec("kafka{}".format(j), j, config.kafka.numKafkaReplications, "kafka", config.kafka.numZookeepers, config) +#@ end +#@ end + +#@ peerUniquePort = 31000 +#@ for i in range(0, len(config.peerOrganizations)): +#@ organization = config.peerOrganizations[i] +#@ for j in range(0, organization.numCa): +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: #@ "ca{}-{}".format(j, organization.name) +spec: #@ spec("ca{}-{}".format(j, organization.name), organization.name, "ca", data.values, organization.mspId, "") +#@ end +#@ for j in range(0, organization.numPeers): +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: #@ "peer{}-{}".format(j, organization.name) +spec: #@ spec("peer{}-{}".format(j, organization.name), organization.name, "peer", config, organization.mspId, peerUniquePort) +#@ peerUniquePort += 1 +#@ end +#@ end + +#@ ordererUniquePort = 30000 +#@ num_organizations = len(config.ordererOrganizations) +#@ for i in range(0, num_organizations): +#@ organization = config.ordererOrganizations[i] +#@ for j in range(0, organization.numCa): +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: #@ "ca{}-{}".format(j, organization.name) +spec: #@ spec("ca{}-{}".format(j, organization.name), organization.name, "ca", data.values, organization.mspId, "") +#@ end +#@ numOderers = organization.numOderers +#@ for j in range(0, numOderers): +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: #@ "orderer{}-{}".format(j, organization.name) +spec: #@ spec("orderer{}-{}".format(j, organization.name), organization.name, "orderer", config, organization.mspId, ordererUniquePort) +#@ ordererUniquePort += 1 +#@ end +#@ end \ No newline at end of file diff --git a/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-pvc.yaml b/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-pvc.yaml new file mode 100644 index 000000000..357810658 --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-pvc.yaml @@ -0,0 +1,91 @@ +#! Copyright IBM Corp. All Rights Reserved. +#! +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:data", "data") +#@ config = data.values +#@ if config.k8s.dataPersistence == True: +#@ if config.orderer.ordererType == "kafka": +#@ for i in range(0, config.kafka.numKafka): +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: #@ "kafka{}-data".format(i) +spec: + storageClassName: #@ config.k8s.storageClass + accessModes: + - ReadWriteMany + resources: + requests: + storage: #@ config.k8s.storageCapacity +#@ end + +#@ for i in range(0, config.kafka.numZookeepers): +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: #@ "zookeeper{}-data".format(i) +spec: + storageClassName: #@ config.k8s.storageClass + accessModes: + - ReadWriteMany + resources: + requests: + storage: #@ config.k8s.storageCapacity +#@ end +#@ end + + +#@ for i in range(0, len(config.peerOrganizations)): +#@ organization = config.peerOrganizations[i] +#@ for j in range(0, organization.numPeers): +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: #@ "peer{}-{}-data".format(j,organization.name) +spec: + storageClassName: #@ config.k8s.storageClass + accessModes: + - ReadWriteMany + resources: + requests: + storage: #@ config.k8s.storageCapacity +#@ if config.dbType == "couchdb": +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: #@ "couchdb-peer{}-{}-data".format(j,organization.name) +spec: + storageClassName: #@ config.k8s.storageClass + accessModes: + - ReadWriteMany + resources: + requests: + storage: #@ config.k8s.storageCapacity +#@ end +#@ end +#@ end +#@ num_ordererOrgs = len(config.ordererOrganizations) +#@ for i in range(0, num_ordererOrgs): +#@ organization = config.ordererOrganizations[i] +#@ numOderers = organization.numOderers +#@ for j in range(0, numOderers): +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: #@ "orderer{}-{}-data".format(j,organization.name) +spec: + storageClassName: #@ config.k8s.storageClass + accessModes: + - ReadWriteMany + resources: + requests: + storage: #@ config.k8s.storageCapacity +#@ end +#@ end +#@ end diff --git a/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-service.yaml b/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-service.yaml new file mode 100755 index 000000000..0c7b0b42c --- /dev/null +++ b/app/platform/fabric/e2e-test/specs/templates/k8s/fabric-k8s-service.yaml @@ -0,0 +1,131 @@ +#! Copyright IBM Corp. All Rights Reserved. +#! +#! SPDX-License-Identifier: Apache-2.0 + +#@ load("@ytt:data", "data") +#@ config = data.values +#@ peerUniquePort = 31000 +#@ for i in range(0, len(config.peerOrganizations)): +#@ organization = config.peerOrganizations[i] +#@ for j in range(0, organization.numCa): +--- +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: #@ "ca{}-{}".format(j, organization.name) + name: #@ "ca{}-{}".format(j, organization.name) +spec: + selector: + k8s-app: #@ "ca{}-{}".format(j, organization.name) + type: #@ config.k8s.serviceType + ports: + - name: port1 + port: 7054 +#@ end +#@ for j in range(0, organization.numPeers): +--- +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: #@ "peer{}-{}".format(j, organization.name) + name: #@ "peer{}-{}".format(j, organization.name) +spec: + selector: + k8s-app: #@ "peer{}-{}".format(j, organization.name) + type: #@ config.k8s.serviceType + ports: + - name: port1 + port: #@ peerUniquePort +#@ if config.k8s.serviceType == "NodePort": + nodePort: #@ peerUniquePort +#@ end + - name: port2 + port: 9443 +#@ peerUniquePort += 1 +#@ end +#@ end +#@ num_ordererOrgs = len(config.ordererOrganizations) +#@ ordererUniquePort = 30000 +#@ for i in range(0, num_ordererOrgs): +#@ organization = config.ordererOrganizations[i] +#@ numOderers = organization.numOderers +#@ for j in range(0, organization.numCa): +--- +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: #@ "ca{}-{}".format(i, organization.name) + name: #@ "ca{}-{}".format(i, organization.name) +spec: + selector: + k8s-app: #@ "ca{}-{}".format(i, organization.name) + type: #@ config.k8s.serviceType + ports: + - name: port1 + port: 7054 +#@ end +#@ for j in range(0, numOderers): +--- +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: #@ "orderer{}-{}".format(j, organization.name) + name: #@ "orderer{}-{}".format(j, organization.name) +spec: + selector: + k8s-app: #@ "orderer{}-{}".format(j, organization.name) + type: #@ config.k8s.serviceType + ports: + - name: port1 + port: #@ ordererUniquePort +#@ if config.k8s.serviceType == "NodePort": + nodePort: #@ ordererUniquePort +#@ end + - name: port2 + port: 8443 +#@ ordererUniquePort += 1 +#@ end +#@ end + +#@ if config.orderer.ordererType == "kafka": +#@ for i in range(0, config.kafka.numKafka): +--- +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: #@ "kafka{}".format(i) + name: #@ "kafka{}".format(i) +spec: + selector: + k8s-app: #@ "kafka{}".format(i) + ports: + - name: port1 + port: 9092 + - name: port2 + port: 9093 +#@ end +#@ for j in range(0, config.kafka.numZookeepers): +--- +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: #@ "zookeeper{}".format(j) + name: #@ "zookeeper{}".format(j) +spec: + selector: + k8s-app: #@ "zookeeper{}".format(j) + ports: + - name: port1 + port: 2888 + - name: port2 + port: 3888 + - name: port3 + port: 2181 +#@ end +#@ end \ No newline at end of file diff --git a/app/platform/fabric/sync/FabricEvent.js b/app/platform/fabric/sync/FabricEvent.js index f9eae25c4..20ea73a41 100644 --- a/app/platform/fabric/sync/FabricEvent.js +++ b/app/platform/fabric/sync/FabricEvent.js @@ -43,10 +43,6 @@ class FabricEvent { } this.createChannelEventHub(channel); - logger.debug( - 'initialize() - Successfully created channel event hub for [%s]', - channel_name - ); } } @@ -73,6 +69,10 @@ class FabricEvent { this.connectChannelEventHub(channel.getName(), eventHub); // Set channel event hub to map FabricEvent.channelEventHubs.set(channel.getName(), eventHub); + logger.debug( + 'Successfully created channel event hub for [%s]', + channel.getName() + ); } /* eslint-disable */ /** diff --git a/ci/azure-pipelines.yml b/ci/azure-pipelines.yml index 7d590e393..284dd097c 100644 --- a/ci/azure-pipelines.yml +++ b/ci/azure-pipelines.yml @@ -4,11 +4,18 @@ name: $(SourceBranchName)-$(Date:yyyyMMdd)$(Rev:.rrr) trigger: - master +variables: + GOPATH: $(Agent.BuildDirectory)/go + PATH: $(Agent.BuildDirectory)/go/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin + GO_VER: 1.13.8 + NODE_VER: 8.11 + jobs: - job: TestsWithCoverage pool: vmImage: ubuntu-16.04 steps: + - template: install_deps.yml - checkout: self - script: | @@ -42,7 +49,7 @@ jobs: - checkout: self - script: | npm install - npm run e2e-test-sanitycheck:ci + npm run e2e-api-test:ci displayName: Run Sanity Checks - job: GUITests diff --git a/ci/install_deps.yml b/ci/install_deps.yml index 9c32bc0dc..bd33e831f 100644 --- a/ci/install_deps.yml +++ b/ci/install_deps.yml @@ -3,11 +3,25 @@ steps: - task: NodeTool@0 inputs: - versionSpec: '8.11' + versionSpec: $(NODE_VER) displayName: Install NodeJs - - task: UsePythonVersion@0 + - task: GoTool@0 inputs: - versionSpec: '2.7' - displayName: Install Python - - script: pip install virtualenv - displayName: Install Virtualenv + version: $(GO_VER) + goPath: $(GOPATH) + - task: Go@0 + inputs: + command: 'get' + arguments: '-u github.com/onsi/ginkgo/ginkgo' + - task: Go@0 + inputs: + command: 'get' + arguments: '-u github.com/onsi/gomega/...' + - task: Go@0 + inputs: + command: 'get' + arguments: '-u gopkg.in/yaml.v2' + - task: Go@0 + inputs: + command: 'get' + arguments: '-u github.com/pkg/errors' \ No newline at end of file diff --git a/client/test/E2E-TEST-README.md b/client/e2e-test/E2E-TEST-README.md similarity index 100% rename from client/test/E2E-TEST-README.md rename to client/e2e-test/E2E-TEST-README.md diff --git a/client/e2e-test/configs/config_guitest.json b/client/e2e-test/configs/config_guitest.json new file mode 100644 index 000000000..919f4eabc --- /dev/null +++ b/client/e2e-test/configs/config_guitest.json @@ -0,0 +1,9 @@ +{ + "network-configs": { + "org1-network": { + "name": "org1-network", + "profile": "./connection-profile/org1-network-for-guitest.json" + } + }, + "license": "Apache-2.0" +} diff --git a/client/e2e-test/configs/connection-profile/org1-network-for-guitest.json b/client/e2e-test/configs/connection-profile/org1-network-for-guitest.json new file mode 100644 index 000000000..f20c25ec6 --- /dev/null +++ b/client/e2e-test/configs/connection-profile/org1-network-for-guitest.json @@ -0,0 +1,59 @@ +{ + "name": "first-network", + "version": "1.0.0", + "license": "Apache-2.0", + "client": { + "tlsEnable": true, + "adminUser": "admin", + "adminPassword": "adminpw", + "enableAuthentication": true, + "organization": "org1", + "connection": { + "timeout": { + "peer": { + "endorser": "300" + }, + "orderer": "300" + } + } + }, + "channels": { + "testorgschannel0": { + "peers": { + "peer0-org1": {} + }, + "connection": { + "timeout": { + "peer": { + "endorser": "6000", + "eventHub": "6000", + "eventReg": "6000" + } + } + } + } + }, + "organizations": { + "org1": { + "mspid": "Org1ExampleCom", + "fullpath": true, + "adminPrivateKey": { + "path": "/tmp/crypto/peerOrganizations/org1/users/Admin@org1/msp/keystore/priv_sk" + }, + "signedCert": { + "path": "/tmp/crypto/peerOrganizations/org1/users/Admin@org1/msp/signcerts/Admin@org1-cert.pem" + } + } + }, + "peers": { + "peer0-org1": { + "tlsCACerts": { + "path": "/tmp/crypto/peerOrganizations/org1/peers/peer0-org1.org1/tls/ca.crt" + }, + "url": "grpcs://peer0-org1:31000", + "grpcOptions": { + "ssl-target-name-override": "peer0-org1" + } + } + } +} diff --git a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-explorer.yaml b/client/e2e-test/docker-compose-explorer.yaml similarity index 68% rename from app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-explorer.yaml rename to client/e2e-test/docker-compose-explorer.yaml index efac346e6..2f38d8d31 100644 --- a/app/platform/fabric/e2e-test/feature/docker-compose/docker-compose-explorer.yaml +++ b/client/e2e-test/docker-compose-explorer.yaml @@ -1,14 +1,17 @@ + # SPDX-License-Identifier: Apache-2.0 version: '2.1' volumes: pgdata: walletstore: + grafana-storage: + prometheus-storage: networks: mynetwork.com: external: - name: ${CORE_PEER_NETWORKID}_behave + name: configfiles_default services: @@ -21,7 +24,7 @@ services: - DATABASE_USERNAME=hppoc - DATABASE_PASSWORD=password volumes: - - ../../../../../persistence/fabric/postgreSQL/db/createdb.sh:/docker-entrypoint-initdb.d/createdb.sh + - ./../../app/persistence/fabric/postgreSQL/db/createdb.sh:/docker-entrypoint-initdb.d/createdb.sh - pgdata:/var/lib/postgresql/data networks: - mynetwork.com @@ -36,16 +39,16 @@ services: - DATABASE_PASSWD=password - LOG_LEVEL_APP=debug - LOG_LEVEL_DB=debug - - LOG_LEVEL_CONSOLE=info + - LOG_LEVEL_CONSOLE=debug - LOG_CONSOLE_STDOUT=true - DISCOVERY_AS_LOCALHOST=false volumes: - - ./../explorer-configs/config-${NETWORK_PROFILE}.json:/opt/explorer/app/platform/fabric/config.json - - ./../explorer-configs/connection-profile:/opt/explorer/app/platform/fabric/connection-profile - - ./../configs/${CORE_PEER_NETWORKID}:/tmp/crypto + - ./configs/config_guitest.json:/opt/explorer/app/platform/fabric/config.json + - ./configs/connection-profile:/opt/explorer/app/platform/fabric/connection-profile + - ${GOPATH}/src/github.com/hyperledger/fabric-test/tools/operator/crypto-config:/tmp/crypto - walletstore:/opt/wallet command: sh -c "sleep 5&& node /opt/explorer/main.js && tail -f /dev/null" ports: - 8090:8080 networks: - - mynetwork.com \ No newline at end of file + - mynetwork.com diff --git a/client/test/docker-compose.yaml b/client/e2e-test/docker-compose.yaml similarity index 100% rename from client/test/docker-compose.yaml rename to client/e2e-test/docker-compose.yaml diff --git a/client/e2e-test/gui-e2e-test-start.sh b/client/e2e-test/gui-e2e-test-start.sh new file mode 100755 index 000000000..f790ac294 --- /dev/null +++ b/client/e2e-test/gui-e2e-test-start.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# +# SPDX-License-Identifier: Apache-2.0 +# + + +ROOTDIR="$(cd "$(dirname "$0")"/../.. && pwd)" + +TIMEOUT=600 +DELAY=10 + +go get -d github.com/hyperledger/fabric-test + +echo "#### Downloaded fabric-test repo" + +# An error that we can ignore is raised when getting fabric-test package +# So we need to enable the error abort option after getting fabric-test pkg +set -e + +pushd $GOPATH/src/github.com/hyperledger/fabric-test +git checkout release-1.4 +git submodule update --init --recursive +git submodule foreach git checkout release-1.4 +echo "#### Updated each sub-module under fabric-test repo" +popd + +pushd $GOPATH/src/github.com/hyperledger/fabric-test/tools/PTE +npm install fabric-client@1.4.5 +npm install fabric-ca-client@1.4.5 +echo "#### Installed required node packages" +popd + +# +# Start selenium standalone server +# +pushd $ROOTDIR/client/e2e-test +export NETWORK_ID=configfiles_default +network_check=$(docker network ls --filter name=${NETWORK_ID} -q | wc -l) +if [ $network_check -eq 0 ]; then + docker network create configfiles_default +fi +echo "#### Created network : ${NETWORK_ID}" + +docker-compose down -v +docker-compose -f docker-compose-explorer.yaml down -v +docker-compose up -d +echo "#### Starting selenium containers ..." + +rc=1 +starttime=$(date +%s) +while + [[ "$(($(date +%s) - starttime))" -lt "$TIMEOUT" ]] && [[ $rc -ne 0 ]]; +do + sleep $DELAY + set -x + docker logs selenium-chrome | grep -q "The node is registered to the hub and ready to use" + rc=$? + set +x +done +echo "#### Started selenium containers" +popd + +pushd $ROOTDIR/client +echo "#### Starting WebDriverI/O based test suite" +npx wdio ./e2e-test/wdio.conf.js +popd \ No newline at end of file diff --git a/client/e2e-test/specs/chaincode/chaincode_view.js b/client/e2e-test/specs/chaincode/chaincode_view.js new file mode 100644 index 000000000..56a9b075c --- /dev/null +++ b/client/e2e-test/specs/chaincode/chaincode_view.js @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable no-unused-expressions */ + +require('chai').should(); + +function test() { + describe('Explorer chaincode view', () => { + context('Chaincode list', () => { + it('should have an entry: BE-688', () => { + // Num. of blocks + var ccLink = browser.$( + '#root > div > div:nth-child(1) > div:nth-child(2) > nav > div > ul > li:nth-child(5)' + ); + ccLink.click(); + browser.pause(5000); + + var ccName = browser.$( + '#root > div > div > div > div > div > div > div > div.rt-table > div.rt-tbody > div > div > div:nth-child(1)' + ); + ccName.getText().should.be.equal('samplecc'); + + var ccTxCount = browser.$( + '#root > div > div > div > div > div > div > div > div.rt-table > div.rt-tbody > div > div > div:nth-child(4)' + ); + ccTxCount.getText().should.be.equal('40'); + + var ccChName = browser.$( + '#root > div > div > div > div > div > div > div > div.rt-table > div.rt-tbody > div > div > div:nth-child(2)' + ); + ccChName.getText().should.be.equal('testorgschannel0'); + }); + }); + }); +} + +module.exports = { + test: test +}; diff --git a/client/e2e-test/specs/dashboard/dashboard.js b/client/e2e-test/specs/dashboard/dashboard.js new file mode 100644 index 000000000..8b8d976b8 --- /dev/null +++ b/client/e2e-test/specs/dashboard/dashboard.js @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable no-unused-expressions */ + +require('chai').should(); +var expect = require('chai').expect; + +function test() { + describe('Dashboard view', () => { + context('Statistics table', () => { + it('should have a metrics', () => { + browser.setTimeout({ + timeouts: 100000 + }); + + // Num. of blocks + var nodeNum = browser + .$( + '#root > div > div > div > div > div:nth-child(1) > div > div > div:nth-child(1) > div > div:nth-child(2) > h1' + ) + .getText(); + nodeNum.should.be.equal('6'); + + // Num. of TX + var txNum = browser + .$( + '#root > div > div > div > div > div:nth-child(1) > div > div > div:nth-child(2) > div > div:nth-child(2) > h1' + ) + .getText(); + txNum.should.be.equal('44'); + + // Num. of Nodes + nodeNum = browser + .$( + '#root > div > div > div > div > div:nth-child(1) > div > div > div:nth-child(3) > div > div:nth-child(2) > h1' + ) + .getText(); + nodeNum.should.be.equal('4'); + + // Num. of CC + var ccNum = browser + .$( + '#root > div > div > div > div > div:nth-child(1) > div > div > div:nth-child(4) > div > div:nth-child(2) > h1' + ) + .getText(); + ccNum.should.be.equal('1'); + }); + }); + + context('Peer list', () => { + it('should have 4 peers and 3 orderers', () => { + var peerList = browser.$( + '#root > div > div > div > div > div:nth-child(2) > div:nth-child(1) > div:nth-child(1) > div > div > div.rt-table > div.rt-tbody' + ); + var elmNum = peerList.getProperty('childElementCount'); + elmNum.should.be.equal(7); + }); + + it('should have the correct URL for each peer', () => { + var peerUrlList = browser.$$( + '#root > div > div > div > div > div:nth-child(2) > div:nth-child(1) > div:nth-child(1) > div > div > div.rt-table > div.rt-tbody > div > div > div' + ); + let peerUrlStrList = peerUrlList.map((elm, idx, array) => { + return elm.getText(); + }); + expect(peerUrlStrList).to.include('peer0-org1'); + expect(peerUrlStrList).to.include('peer1-org1'); + expect(peerUrlStrList).to.include('peer0-org2'); + expect(peerUrlStrList).to.include('peer1-org2'); + expect(peerUrlStrList).to.include('orderer0-ordererorg1'); + expect(peerUrlStrList).to.include('orderer1-ordererorg1'); + expect(peerUrlStrList).to.include('orderer2-ordererorg1'); + }); + }); + + describe('Block history', () => { + it('should have 3 block entries', () => { + var blkList = browser.$$( + '#root > div > div > div > div > div:nth-child(2) > div:nth-child(1) > div:nth-child(2) > div > div > div > section > div > div > div:nth-child(2) > div:nth-child(1)' + ); + blkList[0].getText().should.be.equal('Block 5'); + blkList[1].getText().should.be.equal('Block 4'); + blkList[2].getText().should.be.equal('Block 3'); + }); + }); + + describe('MSP pie chart', () => { + it('should response to click', () => { + var tooltip = browser.$( + '#root > div > div > div > div > div:nth-child(2) > div:nth-child(2) > div > div > div > div.recharts-tooltip-wrapper.recharts-tooltip-wrapper-right.recharts-tooltip-wrapper-bottom > div > ul > li > span.recharts-tooltip-item-name' + ); + var displayTooltip = tooltip.isExisting(); + expect(displayTooltip).to.be.false; + + var path = browser.$( + '#root > div > div > div > div > div:nth-child(2) > div:nth-child(2) > div > div > div > svg > g > g:nth-child(1)' + ); + path.click(); + + tooltip = browser.$( + '#root > div > div > div > div > div:nth-child(2) > div:nth-child(2) > div > div > div > div.recharts-tooltip-wrapper.recharts-tooltip-wrapper-right.recharts-tooltip-wrapper-bottom > div > ul > li > span.recharts-tooltip-item-name' + ); + + displayTooltip = tooltip.isExisting(); + expect(displayTooltip).to.be.true; + console.log(tooltip.getText()); + }); + }); + }); +} + +module.exports = { + test: test +}; diff --git a/client/e2e-test/specs/gui-e2e-test-network-spec.yml b/client/e2e-test/specs/gui-e2e-test-network-spec.yml new file mode 100644 index 000000000..df0fdb9d1 --- /dev/null +++ b/client/e2e-test/specs/gui-e2e-test-network-spec.yml @@ -0,0 +1,139 @@ +#! Copyright IBM Corp. All Rights Reserved. +#! +#! SPDX-License-Identifier: Apache-2.0 + +--- +#! fabricVersion: +#! Released images are pulled from docker hub hyperledger/, e.g. 1.4.5 or 2.0.0 +#! Development stream images are pulled from +#! hyperledger-fabric.jfrog.io/, e.g. 1.4.5-stable or 2.0.0-stable +fabricVersion: 1.4.4 +#! peer database ledger type (couchdb, goleveldb) +dbType: goleveldb +#! This parameter is used to define fabric logging spec in peers +peerFabricLoggingSpec: error +#! This parameter is used to define fabric logging spec in orderers +ordererFabricLoggingSpec: error +#! tls in the network (true, false or mutual(mutualtls)) +tls: true +#! fabric metrics with prometheus (true/false) +metrics: false +#! true - enable gossip and dynamic leader election +#! false - disable gossip and set all peers as org leaders +gossipEnable: false +#! enable node ou's in fabric network (true/false) +enableNodeOUs: true + +#! For smoke test suite, crypto-config, connection-profile and channel-artifacts are stored +#! in smoke directory +artifactsLocation: . + +#! Orderer Config Settings +orderer: +#! Consensus type + ordererType: etcdraft + batchSize: + maxMessageCount: 100 + absoluteMaxBytes: 10 MB + preferredMaxBytes: 2 MB + batchTimeOut: 2s +#! Etcd raft options and this will be used when ordererType is +#! selected as etcdraft + etcdraftOptions: + tickInterval: 500ms + electionTick: 10 + heartbeatTick: 1 + maxInflightBlocks: 5 + snapshotIntervalSize: 100 MB + +#! Not being used for smoke test suite +#! Number of kafka and zookeeper to be launched in network +#! when ordererType is kafka +kafka: + numKafka: 5 + #! number of kafka replications for each channel + numKafkaReplications: 3 + numZookeepers: 3 + +ordererOrganizations: +- name: ordererorg1 + mspId: OrdererOrgExampleCom + numOderers: 3 + numCa: 0 + +peerOrganizations: +- name: org1 + mspId: Org1ExampleCom + numPeers: 2 + numCa: 1 + +- name: org2 + mspId: Org2ExampleCom + numPeers: 2 + numCa: 1 + +#! Capabilites for Orderer, Channel, Application groups +ordererCapabilities: + V1_4_2: true + +channelCapabilities: + V1_4_3: true + +applicationCapabilities: + V1_4_2: true + +#! Create the channel creation transactions; every org will be included in every channel +#! This used testorgschannel as the prefix and channels are used like testorgschannel0, +#! testorgschannel1.... based on number of channels passed +#! (note: client will need to submit the transactions to create channels) +numChannels: 1 + +#! Not being used for smoke test suite +k8s: + serviceType: NodePort + #! dataPersistence is used to store the data from fabric containers + #! It can take values of true, false and local + #! When true is used, it uses storageClass and storageCapacity to create + #! persistent volumes. When false is used, backup will not be configured. + #! When local is used, hostPath will be used to store the data from fabric containers + #! to worker nodes on which pods are running. + dataPersistence: true + storageClass: default + storageCapacity: 20Gi + resources: + orderers: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: "0.5" + memory: 1Gi + peers: + limits: + cpu: "0.5" + memory: 2Gi + requests: + cpu: "0.5" + memory: 2Gi +#! dind will be used to run all chaincode containers of a peer + dind: + limits: + cpu: "1" + memory: 1Gi + requests: + cpu: "1" + memory: 1Gi + couchdb: + limits: + cpu: "0.2" + memory: 1Gi + requests: + cpu: "0.1" + memory: 1Gi + kafka: + limits: + cpu: "0.2" + memory: 1Gi + requests: + cpu: "0.1" + memory: 1Gi diff --git a/client/e2e-test/specs/network/network_view.js b/client/e2e-test/specs/network/network_view.js new file mode 100644 index 000000000..767785165 --- /dev/null +++ b/client/e2e-test/specs/network/network_view.js @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable no-unused-expressions */ + +require('chai').should(); +var expect = require('chai').expect; + +function test() { + describe('Explorer network view', () => { + describe('Peer node list', () => { + it('should have 4 peers and 3 orderer: BE-695', () => { + // Validate each node name retrieved form the table + var networkLink = browser.$( + '#root > div > div:nth-child(1) > div:nth-child(2) > nav > div > ul > li:nth-child(2)' + ); + networkLink.click(); + browser.pause(5000); + var pageSizeSelector = browser.$('.-pageSizeOptions select'); + pageSizeSelector.selectByIndex(1); + + var nodeLists = browser.$$( + '#root > div > div > div > div > div > div > div > div.rt-table > div.rt-tbody > div > div > div:nth-child(1)' + ); + let nodeStrList = nodeLists.map((elm, idx, array) => { + return elm.getText(); + }); + expect(nodeStrList).to.include('peer0-org1'); + expect(nodeStrList).to.include('peer1-org1'); + expect(nodeStrList).to.include('peer0-org2'); + expect(nodeStrList).to.include('peer1-org2'); + expect(nodeStrList).to.include('orderer0-ordererorg1'); + expect(nodeStrList).to.include('orderer1-ordererorg1'); + expect(nodeStrList).to.include('orderer2-ordererorg1'); + }); + }); + }); +} + +module.exports = { + test: test +}; diff --git a/client/e2e-test/specs/root.js b/client/e2e-test/specs/root.js new file mode 100644 index 000000000..dbfca6659 --- /dev/null +++ b/client/e2e-test/specs/root.js @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +/* eslint-disable no-unused-expressions */ + +require('chai').should(); +var path = require('path'); + +const { spawnSync } = require('child_process'); +const dashboard = require('./dashboard/dashboard.js'); +const network = require('./network/network_view.js'); +const chaincode = require('./chaincode/chaincode_view.js'); + +describe('GUI e2e test', () => { + before(async function() { + this.timeout(180000); + const cwd = process.cwd(); + const fabric_test_path = path.join( + process.env.GOPATH, + '/src/github.com/hyperledger/fabric-test', + '/tools/operator' + ); + const network_spec_path = path.join( + cwd, + 'e2e-test/specs/gui-e2e-test-network-spec.yml' + ); + const test_input_path = path.join(cwd, 'e2e-test/specs/smoke-test-input.yml'); + + process.chdir(fabric_test_path); + + let child = spawnSync( + 'go', + ['run', 'main.go', '-i', network_spec_path, '-a', 'up'], + { cwd: fabric_test_path, env: process.env, shell: true } + ); + if (child.error) console.log('network up', child.stderr.toString()); + if (child.stdout) console.log('network up(stdout)', child.stdout.toString()); + if (child.stderr) console.log('network up(stderr)', child.stderr.toString()); + + console.log('Network started'); + + child = spawnSync( + 'go', + ['run', 'main.go', '-i', test_input_path, '-a', 'create'], + { cwd: fabric_test_path, env: process.env, shell: true } + ); + if (child.error) console.log('channel create', child.stderr.toString()); + else console.log('Created channel'); + + child = spawnSync( + 'go', + ['run', 'main.go', '-i', test_input_path, '-a', 'join'], + { cwd: fabric_test_path, env: process.env, shell: true } + ); + if (child.error) console.log('channel join', child.stderr.toString()); + else console.log('Joined to channel'); + + child = spawnSync( + 'go', + ['run', 'main.go', '-i', test_input_path, '-a', 'anchorpeer'], + { cwd: fabric_test_path, env: process.env, shell: true } + ); + if (child.error) console.log('update anchor', child.stderr.toString()); + else console.log('Updated anchor peer'); + + child = spawnSync( + 'go', + ['run', 'main.go', '-i', test_input_path, '-a', 'install'], + { cwd: fabric_test_path, env: process.env, shell: true } + ); + if (child.error) console.log('cc install', child.stderr.toString()); + else console.log('Installed chaincode'); + + child = spawnSync( + 'go', + ['run', 'main.go', '-i', test_input_path, '-a', 'instantiate'], + { cwd: fabric_test_path, env: process.env, shell: true } + ); + if (child.error) console.log('cc instantiate', child.stderr.toString()); + else console.log('Instantiated chaincode'); + + child = spawnSync( + 'go', + ['run', 'main.go', '-i', test_input_path, '-a', 'invoke'], + { cwd: fabric_test_path, env: process.env, shell: true } + ); + if (child.error) console.log('cc invoke', child.stderr.toString()); + else console.log('Invoked chaincode'); + + process.chdir(cwd); + child = spawnSync( + 'docker-compose', + ['-f', path.join(cwd, 'e2e-test/docker-compose-explorer.yaml'), 'up', '-d'], + { cwd: fabric_test_path, env: process.env, shell: true } + ); + if (child.error) console.log('launch explorer', child.stderr.toString()); + else console.log('Launched explorer'); + + // Wait for a while to get ready to start REST API server + await new Promise(r => setTimeout(r, 20000)); + }); + + describe('Run each test suite', () => { + before(function() { + // runs before all tests in this block + browser.url('http://explorer.mynetwork.com:8080'); + // Login + console.log('before all'); + var userInput = browser.$('#user'); + var passInput = browser.$('#password'); + try { + userInput.setValue('admin'); + passInput.setValue('adminpw'); + } catch (error) { + let child = spawnSync('docker', ['ps', '-a'], { + cwd: fabric_test_path, + env: process.env, + shell: true + }); + if (child.stdout) + console.log('docker ps (stdout)', child.stdout.toString()); + if (child.stderr) + console.log('docker ps (stderr)', child.stderr.toString()); + child = spawnSync('docker', ['logs', 'explorer.mynetwork.com'], { + cwd: fabric_test_path, + env: process.env, + shell: true + }); + if (child.stdout) + console.log('docker logs (stdout)', child.stdout.toString()); + if (child.stderr) + console.log('docker logs (stderr)', child.stderr.toString()); + child = spawnSync( + 'find', + ['${GOPATH}/src/github.com/hyperledger/fabric-test/tools/operator'], + { + cwd: fabric_test_path, + env: process.env, + shell: true + } + ); + if (child.stdout) + console.log('find crypto-config (stdout)', child.stdout.toString()); + if (child.stderr) + console.log('find crypto-config (stderr)', child.stderr.toString()); + return; + } + var signinBtn = browser.$('#root > div > div > div > form > button > span'); + + signinBtn.click(); + browser.pause(1000); + }); + + describe('Check each view', () => { + dashboard.test(); + network.test(); + chaincode.test(); + }); + }); +}); diff --git a/client/e2e-test/specs/smoke-test-input.yml b/client/e2e-test/specs/smoke-test-input.yml new file mode 100644 index 000000000..1579c3ae7 --- /dev/null +++ b/client/e2e-test/specs/smoke-test-input.yml @@ -0,0 +1,105 @@ +organizations: + - name: org1 +#! For smoke test suite, connection-profile are read from smoke directory + connProfilePath: ./connection-profile/connection_profile_org1.yaml + - name: org2 + connProfilePath: ./connection-profile/connection_profile_org2.yaml + +createChannel: + - channelPrefix: testorgschannel + numChannels: 1 +#! For smoke test suite, channel-artifacts are read from smoke directory + channelTxPath: ./channel-artifacts/ + organizations: org1 + +anchorPeerUpdate: + - channelName: testorgschannel0 + organizations: org1 +#! For smoke test suite, channel-artifacts are read from smoke directory + anchorPeerUpdateTxPath: ./channel-artifacts/testorgschannel0org1anchor.tx + - channelName: testorgschannel0 + organizations: org2 + anchorPeerUpdateTxPath: ./channel-artifacts/testorgschannel0org2anchor.tx + +joinChannel: +# joins all peers in listed organziations to all channels based on channelPrefix and numChannels + - channelPrefix: testorgschannel + numChannels: 1 + organizations: org1,org2 + +installChaincode: +# installs chaincode with specified name on all peers in listed organziations + - name: samplecc + version: v1 + path: github.com/hyperledger/fabric-test/chaincodes/samplecc/go + organizations: org1,org2 + language: golang + metadataPath: "" + + - name: samplecc + version: v2 + path: github.com/hyperledger/fabric-test/chaincodes/samplecc/go + organizations: org1,org2 + language: golang + metadataPath: "" + +instantiateChaincode: + - channelName: testorgschannel0 + name: samplecc + version: v1 + args: "" + organizations: org1 + endorsementPolicy: 2of(org1,org2) + collectionPath: "" + +upgradeChaincode: + - channelName: testorgschannel0 + name: samplecc + version: v2 + args: "" + organizations: org1 + endorsementPolicy: 1of(org1,org2) + collectionPath: "" + +invokes: + - channelName: testorgschannel0 + name: samplecc + targetPeers: OrgAnchor + nProcPerOrg: 2 + nRequest: 10 + runDur: 0 + organizations: org1,org2 + txnOpt: + - mode: constant + options: + constFreq: 0 + devFreq: 0 + queryCheck: 100 + eventOpt: + type: FilteredBlock + listener: Block + timeout: 240000 + ccOpt: + ccType: ccchecker + keyStart: 0 + payLoadMin: 1024 + payLoadMax: 2048 + args: "put,a1,1" + +queries: + - channelName: testorgschannel0 + name: samplecc + targetPeers: OrgAnchor + nProcPerOrg: 2 + nRequest: 10 + runDur: 0 + organizations: org1,org2 + ccOpt: + ccType: ccchecker + keyStart: 0 + txnOpt: + - mode: constant + options: + constFreq: 0 + devFreq: 0 + args: "get,a1" diff --git a/client/test/wdio.conf.js b/client/e2e-test/wdio.conf.js similarity index 99% rename from client/test/wdio.conf.js rename to client/e2e-test/wdio.conf.js index 450226bbc..e2dcf801a 100644 --- a/client/test/wdio.conf.js +++ b/client/e2e-test/wdio.conf.js @@ -20,7 +20,7 @@ exports.config = { // NPM script (see https://docs.npmjs.com/cli/run-script) then the current working // directory is where your package.json resides, so `wdio` will be called from there. // - specs: ['./test/specs/**/*.js'], + specs: ['./e2e-test/specs/*.js'], // Patterns to exclude. exclude: [ // 'path/to/excluded/files' diff --git a/client/package-lock.json b/client/package-lock.json index 464964e0b..5a185a0fc 100644 --- a/client/package-lock.json +++ b/client/package-lock.json @@ -3801,8 +3801,7 @@ }, "ansi-regex": { "version": "2.1.1", - "bundled": true, - "optional": true + "bundled": true }, "aproba": { "version": "1.2.0", @@ -3820,13 +3819,11 @@ }, "balanced-match": { "version": "1.0.0", - "bundled": true, - "optional": true + "bundled": true }, "brace-expansion": { "version": "1.1.11", "bundled": true, - "optional": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -3839,18 +3836,15 @@ }, "code-point-at": { "version": "1.1.0", - "bundled": true, - "optional": true + "bundled": true }, "concat-map": { "version": "0.0.1", - "bundled": true, - "optional": true + "bundled": true }, "console-control-strings": { "version": "1.1.0", - "bundled": true, - "optional": true + "bundled": true }, "core-util-is": { "version": "1.0.2", @@ -3953,8 +3947,7 @@ }, "inherits": { "version": "2.0.3", - "bundled": true, - "optional": true + "bundled": true }, "ini": { "version": "1.3.5", @@ -3964,7 +3957,6 @@ "is-fullwidth-code-point": { "version": "1.0.0", "bundled": true, - "optional": true, "requires": { "number-is-nan": "^1.0.0" } @@ -3977,20 +3969,17 @@ "minimatch": { "version": "3.0.4", "bundled": true, - "optional": true, "requires": { "brace-expansion": "^1.1.7" } }, "minimist": { "version": "0.0.8", - "bundled": true, - "optional": true + "bundled": true }, "minipass": { "version": "2.3.5", "bundled": true, - "optional": true, "requires": { "safe-buffer": "^5.1.2", "yallist": "^3.0.0" @@ -4007,7 +3996,6 @@ "mkdirp": { "version": "0.5.1", "bundled": true, - "optional": true, "requires": { "minimist": "0.0.8" } @@ -4080,8 +4068,7 @@ }, "number-is-nan": { "version": "1.0.1", - "bundled": true, - "optional": true + "bundled": true }, "object-assign": { "version": "4.1.1", @@ -4091,7 +4078,6 @@ "once": { "version": "1.4.0", "bundled": true, - "optional": true, "requires": { "wrappy": "1" } @@ -4167,8 +4153,7 @@ }, "safe-buffer": { "version": "5.1.2", - "bundled": true, - "optional": true + "bundled": true }, "safer-buffer": { "version": "2.1.2", @@ -4198,7 +4183,6 @@ "string-width": { "version": "1.0.2", "bundled": true, - "optional": true, "requires": { "code-point-at": "^1.0.0", "is-fullwidth-code-point": "^1.0.0", @@ -4216,7 +4200,6 @@ "strip-ansi": { "version": "3.0.1", "bundled": true, - "optional": true, "requires": { "ansi-regex": "^2.0.0" } @@ -4255,13 +4238,11 @@ }, "wrappy": { "version": "1.0.2", - "bundled": true, - "optional": true + "bundled": true }, "yallist": { "version": "3.0.3", - "bundled": true, - "optional": true + "bundled": true } } }, @@ -8385,8 +8366,7 @@ }, "ansi-regex": { "version": "2.1.1", - "bundled": true, - "optional": true + "bundled": true }, "aproba": { "version": "1.2.0", @@ -8410,7 +8390,6 @@ "brace-expansion": { "version": "1.1.11", "bundled": true, - "optional": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -8423,8 +8402,7 @@ }, "code-point-at": { "version": "1.1.0", - "bundled": true, - "optional": true + "bundled": true }, "concat-map": { "version": "0.0.1", @@ -8433,8 +8411,7 @@ }, "console-control-strings": { "version": "1.1.0", - "bundled": true, - "optional": true + "bundled": true }, "core-util-is": { "version": "1.0.2", @@ -8537,8 +8514,7 @@ }, "inherits": { "version": "2.0.3", - "bundled": true, - "optional": true + "bundled": true }, "ini": { "version": "1.3.5", @@ -8548,7 +8524,6 @@ "is-fullwidth-code-point": { "version": "1.0.0", "bundled": true, - "optional": true, "requires": { "number-is-nan": "^1.0.0" } @@ -8561,20 +8536,17 @@ "minimatch": { "version": "3.0.4", "bundled": true, - "optional": true, "requires": { "brace-expansion": "^1.1.7" } }, "minimist": { "version": "0.0.8", - "bundled": true, - "optional": true + "bundled": true }, "minipass": { "version": "2.3.5", "bundled": true, - "optional": true, "requires": { "safe-buffer": "^5.1.2", "yallist": "^3.0.0" @@ -8591,7 +8563,6 @@ "mkdirp": { "version": "0.5.1", "bundled": true, - "optional": true, "requires": { "minimist": "0.0.8" } @@ -8664,8 +8635,7 @@ }, "number-is-nan": { "version": "1.0.1", - "bundled": true, - "optional": true + "bundled": true }, "object-assign": { "version": "4.1.1", @@ -8675,7 +8645,6 @@ "once": { "version": "1.4.0", "bundled": true, - "optional": true, "requires": { "wrappy": "1" } @@ -8751,8 +8720,7 @@ }, "safe-buffer": { "version": "5.1.2", - "bundled": true, - "optional": true + "bundled": true }, "safer-buffer": { "version": "2.1.2", @@ -8782,7 +8750,6 @@ "string-width": { "version": "1.0.2", "bundled": true, - "optional": true, "requires": { "code-point-at": "^1.0.0", "is-fullwidth-code-point": "^1.0.0", @@ -8800,7 +8767,6 @@ "strip-ansi": { "version": "3.0.1", "bundled": true, - "optional": true, "requires": { "ansi-regex": "^2.0.0" } @@ -8839,13 +8805,11 @@ }, "wrappy": { "version": "1.0.2", - "bundled": true, - "optional": true + "bundled": true }, "yallist": { "version": "3.0.3", - "bundled": true, - "optional": true + "bundled": true } } } diff --git a/client/test/e2e-setup.sh b/client/test/e2e-setup.sh deleted file mode 100755 index 92dad812d..000000000 --- a/client/test/e2e-setup.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash - -# -# SPDX-License-Identifier: Apache-2.0 -# - - -ROOTDIR="$(cd "$(dirname "$0")"/../.. && pwd)" -# export CORE_PEER_NETWORKID=e2egui -# export COMPOSE_PROJECT_NAME=$CORE_PEER_NETWORKID -# export NETWORK_PROFILE=first-network - -# docker rm -f $(docker ps -aq) -# docker volume rm -f $(docker volume ls -q) - -TIMEOUT=600 -DELAY=10 - -# # -# # Setup fabric-samples/first-network -# # -# pushd $ROOTDIR/app/platform/fabric/e2e-test/fabric-samples/first-network - -# rm -rf ../../configs/$CORE_PEER_NETWORKID -# rm -rf channel-artifacts/* ordererOrganizations peerOrganizations - -# mkdir -p ../../configs/$CORE_PEER_NETWORKID -# ./byfn.sh generate -c mychannel - -# cp -a channel-artifacts crypto-config/* ../../configs/$CORE_PEER_NETWORKID - -# docker-compose -f docker-compose-explorer.yaml down -v -# docker-compose -f docker-compose-explorer.yaml up -d -# docker exec -d cli scripts/script.sh - -# # continue to poll -# # we either get a matched keyword, or reach TIMEOUT -# rc=1 -# starttime=$(date +%s) -# while -# [[ "$(($(date +%s) - starttime))" -lt "$TIMEOUT" ]] && [[ $rc -ne 0 ]]; -# do -# sleep $DELAY -# set -x -# if [ $(docker ps -q --filter name='dev-peer1.org2' | wc -l) -eq 1 ]; then -# rc=0 -# fi -# set +x -# done - -# popd - -# # -# # Bring up Explorer -# # -# pushd $ROOTDIR/app/platform/fabric/e2e-test/docker-compose -# docker-compose -f docker-compose-explorer.yaml down -v -# docker-compose -f docker-compose-explorer.yaml up -d - -# rc=1 -# starttime=$(date +%s) -# while -# [[ "$(($(date +%s) - starttime))" -lt "$TIMEOUT" ]] && [[ $rc -ne 0 ]]; -# do -# sleep $DELAY -# set -x -# docker logs explorer.mynetwork.com | grep -q "Please open web browser to access" -# rc=$? -# set +x -# done - -# popd - -# -# Start selenium standalone server -# -pushd $ROOTDIR/client/test -sudo apt-get install jq -export NETWORK_ID=$(docker inspect peer0.org1.example.com | jq '.[].NetworkSettings.Networks' | jq -rc 'keys' | jq -r '.[0]') -docker-compose down -docker-compose up -d - -rc=1 -starttime=$(date +%s) -while - [[ "$(($(date +%s) - starttime))" -lt "$TIMEOUT" ]] && [[ $rc -ne 0 ]]; -do - sleep $DELAY - set -x - docker logs selenium-chrome | grep -q "The node is registered to the hub and ready to use" - rc=$? - set +x -done - -popd diff --git a/client/test/specs/chaincode_view.js b/client/test/specs/chaincode_view.js deleted file mode 100644 index c96778c3c..000000000 --- a/client/test/specs/chaincode_view.js +++ /dev/null @@ -1,49 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -/* eslint-disable no-unused-expressions */ - -require('chai').should(); - -describe('Explorer chaincode view', () => { - before(function() { - // runs before all tests in this block - browser.url('http://explorer.mynetwork.com:8080'); - // Login - var userInput = browser.$('#user'); - var passInput = browser.$('#password'); - userInput.setValue('test'); - passInput.setValue('test'); - var signinBtn = browser.$('#root > div > div > div > form > button > span'); - - signinBtn.click(); - browser.pause(1000); - }); - - describe('chaincode list', () => { - it('should have an entry: BE-688', () => { - // Num. of blocks - var ccLink = browser.$( - '#root > div > div:nth-child(1) > div:nth-child(2) > nav > div > ul > li:nth-child(5)' - ); - ccLink.click(); - browser.pause(5000); - - var ccName = browser.$( - '#root > div > div > div > div > div > div > div > div.rt-table > div.rt-tbody > div > div > div:nth-child(1)' - ); - ccName.getText().should.be.equal('mycc'); - - var ccTxCount = browser.$( - '#root > div > div > div > div > div > div > div > div.rt-table > div.rt-tbody > div > div > div:nth-child(4)' - ); - ccTxCount.getText().should.be.equal('1'); - - var ccChName = browser.$( - '#root > div > div > div > div > div > div > div > div.rt-table > div.rt-tbody > div > div > div:nth-child(2)' - ); - ccChName.getText().should.be.equal('mychannel'); - }); - }); -}); diff --git a/client/test/specs/dashboard.js b/client/test/specs/dashboard.js deleted file mode 100644 index 63addd37e..000000000 --- a/client/test/specs/dashboard.js +++ /dev/null @@ -1,124 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -/* eslint-disable no-unused-expressions */ - -require('chai').should(); -var expect = require('chai').expect; - -describe('Explorer dashboard', () => { - before(function() { - // runs before all tests in this block - browser.url('http://explorer.mynetwork.com:8080'); - // Login - var userInput = browser.$('#user'); - var passInput = browser.$('#password'); - userInput.setValue('test'); - passInput.setValue('test'); - var signinBtn = browser.$('#root > div > div > div > form > button > span'); - - signinBtn.click(); - browser.pause(1000); - }); - - describe('statistics', () => { - it('should have a metrics', () => { - browser.setTimeout({ - timeouts: 100000 - }); - - // Num. of blocks - var nodeNum = browser - .$( - '#root > div > div > div > div > div:nth-child(1) > div > div > div:nth-child(1) > div > div:nth-child(2) > h1' - ) - .getText(); - nodeNum.should.be.equal('5'); - - // Num. of TX - var txNum = browser - .$( - '#root > div > div > div > div > div:nth-child(1) > div > div > div:nth-child(2) > div > div:nth-child(2) > h1' - ) - .getText(); - txNum.should.be.equal('5'); - - // Num. of Nodes - nodeNum = browser - .$( - '#root > div > div > div > div > div:nth-child(1) > div > div > div:nth-child(3) > div > div:nth-child(2) > h1' - ) - .getText(); - nodeNum.should.be.equal('4'); - - // Num. of CC - var ccNum = browser - .$( - '#root > div > div > div > div > div:nth-child(1) > div > div > div:nth-child(4) > div > div:nth-child(2) > h1' - ) - .getText(); - ccNum.should.be.equal('1'); - }); - }); - - describe('peers', () => { - it('should have 5 peers', () => { - var peerList = browser.$( - '#root > div > div > div > div > div:nth-child(2) > div:nth-child(1) > div:nth-child(1) > div > div > div.rt-table > div.rt-tbody' - ); - var elmNum = peerList.getProperty('childElementCount'); - elmNum.should.be.equal(7); - }); - - it('should have the correct URL for each peer', () => { - var peerUrlList = browser.$$( - '#root > div > div > div > div > div:nth-child(2) > div:nth-child(1) > div:nth-child(1) > div > div > div.rt-table > div.rt-tbody > div > div > div' - ); - let peerUrlStrList = peerUrlList.map((elm, idx, array) => { - return elm.getText(); - }); - expect(peerUrlStrList).to.include('peer0.org1.example.com'); - expect(peerUrlStrList).to.include('peer1.org1.example.com'); - expect(peerUrlStrList).to.include('peer0.org2.example.com'); - expect(peerUrlStrList).to.include('peer1.org2.example.com'); - expect(peerUrlStrList).to.include('orderer0.example.com'); - expect(peerUrlStrList).to.include('orderer1.example.com'); - expect(peerUrlStrList).to.include('orderer2.example.com'); - }); - }); - - describe('blocks', () => { - it('should have 3 block entries', () => { - var blkList = browser.$$( - '#root > div > div > div > div > div:nth-child(2) > div:nth-child(1) > div:nth-child(2) > div > div > div > section > div > div > div:nth-child(2) > div:nth-child(1)' - ); - blkList[0].getText().should.be.equal('Block 4'); - blkList[1].getText().should.be.equal('Block 3'); - blkList[2].getText().should.be.equal('Block 2'); - }); - }); - - describe('MSP pie chart', () => { - it('should response to click', () => { - var tooltip = browser.$( - '#root > div > div > div > div > div:nth-child(2) > div:nth-child(2) > div > div > div > div.recharts-tooltip-wrapper.recharts-tooltip-wrapper-right.recharts-tooltip-wrapper-bottom > div > ul > li > span.recharts-tooltip-item-name' - ); - var displayTooltip = tooltip.isExisting(); - expect(displayTooltip).to.be.false; - - var path = browser.$( - '#root > div > div > div > div > div:nth-child(2) > div:nth-child(2) > div > div > div > svg > g > g:nth-child(1)' - ); - path.click(); - - tooltip = browser.$( - '#root > div > div > div > div > div:nth-child(2) > div:nth-child(2) > div > div > div > div.recharts-tooltip-wrapper.recharts-tooltip-wrapper-right.recharts-tooltip-wrapper-bottom > div > ul > li > span.recharts-tooltip-item-name' - ); - - displayTooltip = tooltip.isExisting(); - expect(displayTooltip).to.be.true; - console.log(tooltip.getText()); - }); - }); -}); diff --git a/client/test/specs/network_view.js b/client/test/specs/network_view.js deleted file mode 100644 index 543321482..000000000 --- a/client/test/specs/network_view.js +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - */ - -/* eslint-disable no-unused-expressions */ - -require('chai').should(); -var expect = require('chai').expect; - -describe('Explorer network view', () => { - before(function() { - // runs before all tests in this block - browser.url('http://explorer.mynetwork.com:8080'); - // Login - var userInput = browser.$('#user'); - var passInput = browser.$('#password'); - userInput.setValue('test'); - passInput.setValue('test'); - var signinBtn = browser.$('#root > div > div > div > form > button > span'); - - signinBtn.click(); - browser.pause(1000); - }); - - describe('node list', () => { - it('should have 4 peers and 1 orderer: BE-695', () => { - // Validate each node name retrieved form the table - var networkLink = browser.$( - '#root > div > div:nth-child(1) > div:nth-child(2) > nav > div > ul > li:nth-child(2)' - ); - networkLink.click(); - browser.pause(5000); - - var nodeLists = browser.$$( - '#root > div > div > div > div > div > div > div > div.rt-table > div.rt-tbody > div > div > div:nth-child(1)' - ); - let nodeStrList = nodeLists.map((elm, idx, array) => { - return elm.getText(); - }); - expect(nodeStrList).to.include('peer0.org1.example.com'); - expect(nodeStrList).to.include('peer1.org1.example.com'); - expect(nodeStrList).to.include('peer0.org2.example.com'); - expect(nodeStrList).to.include('peer1.org2.example.com'); - expect(nodeStrList).to.include('orderer0.example.com'); - }); - }); -}); diff --git a/package.json b/package.json index f8518f652..ff1671e69 100644 --- a/package.json +++ b/package.json @@ -90,17 +90,13 @@ "e2e-test-check-tool": "/bin/bash -c 'if [[ -z $(which configtxgen) ]]; then echo \"### Need to install tools ###\n\"; exit -1; fi'", "e2e-test-check-img": "/bin/bash -c 'if [[ -z $(docker images -q hyperledger/fabric-peer:latest) ]]; then echo \"### Need to pull fabric images ###\n\"; exit -1; fi'", "e2e-test-setup-tool:ci": "/bin/bash -c 'which configtxgen >/dev/null; if [[ $? -eq 0 ]] && [[ $(configtxgen -version | grep Version: | sed -e \"s/^.*:\\s*//\") == \"1.4.4\" ]]; then echo Found; else echo Not found; curl -sSL https://raw.githubusercontent.com/hyperledger/fabric/master/scripts/bootstrap.sh | bash -s -- 1.4.4 1.4.4 0.4.18 -s; fi'", - "e2e-test-setup-fabric-ca-client:ci": "cd app/platform/fabric/e2e-test/feature; if [ ! -f ./bin/fabric-ca-client ]; then mkdir bin; wget -O - https://github.com/hyperledger/fabric-ca/releases/download/v1.4.4/hyperledger-fabric-ca-linux-amd64-1.4.4.tar.gz | tar --strip-components=1 -zxf - -C bin; fi", - "e2e-test-setup-env": "cd app/platform/fabric/e2e-test/feature; if [ ! -e e2e-test ]; then virtualenv e2e-test; fi && . ./e2e-test/bin/activate && pip install -U setuptools && pip install -r requirement.txt", "e2e-test-setup-img": "./build_docker_image.sh", - "e2e-test-run": "cd app/platform/fabric/e2e-test/feature; . ./e2e-test/bin/activate && behave explorer.feature", - "e2e-test-run-sanitycheck": "cd app/platform/fabric/e2e-test/feature; . ./e2e-test/bin/activate && behave --stop --no-skipped --no-capture explorer.feature", - "e2e-test": "run-s e2e-test-check-tool e2e-test-check-img e2e-test-setup-env e2e-test-setup-img e2e-test-run", - "e2e-test-sanitycheck:ci": "cross-env PATH=$PATH:$PWD/bin run-s e2e-test-setup-tool:ci e2e-test-setup-fabric-ca-client:ci e2e-test-check-tool e2e-test-check-img e2e-test-setup-env e2e-test-setup-img e2e-test-run-sanitycheck", + "e2e-api-test-run": "cd app/platform/fabric/e2e-test; ./runTestSuite.sh", + "e2e-api-test": "run-s e2e-test-check-tool e2e-test-check-img e2e-test-setup-img e2e-api-test-run", + "e2e-api-test:ci": "cross-env PATH=$PATH:$PWD/bin run-s e2e-test-setup-tool:ci e2e-test-check-tool e2e-test-check-img e2e-test-setup-img e2e-api-test-run", "e2e-gui-test-setup-env": "cd client; npm install", - "e2e-gui-test-setup": "/bin/bash -c 'pushd app/platform/fabric/e2e-test/feature; . ./e2e-test/bin/activate && behave explorer_gui_e2e.feature; popd; ./client/test/e2e-setup.sh'", - "e2e-gui-test-run": "cd client; npx wdio ./test/wdio.conf.js", - "e2e-gui-test": "run-s e2e-gui-test-setup e2e-gui-test-run", - "e2e-gui-test:ci": "cross-env PATH=$PATH:$PWD/bin run-s e2e-test-setup-tool:ci e2e-test-check-tool e2e-test-setup-img e2e-gui-test-setup-env e2e-test-setup-env e2e-gui-test-setup e2e-gui-test-run" + "e2e-gui-test-run": "cd client/e2e-test; ./gui-e2e-test-start.sh", + "e2e-gui-test": "run-s e2e-gui-test-setup-env e2e-gui-test-run", + "e2e-gui-test:ci": "cross-env PATH=$PATH:$PWD/bin run-s e2e-test-setup-tool:ci e2e-test-check-tool e2e-test-setup-img e2e-gui-test-setup-env e2e-gui-test-run" } } diff --git a/scripts/verify-license.sh b/scripts/verify-license.sh index 6c3e8c89c..64d484ba9 100755 --- a/scripts/verify-license.sh +++ b/scripts/verify-license.sh @@ -35,7 +35,7 @@ else fi # If you want to exclude some paths, add a keyword with an extended regular expression format to excluded_paths -excluded_paths="\.ico$ \.jpg$ \.json$ \.png$ \.svg$ \.tx$ \.crt$ \.ya*ml$ \.key$ \.pem$ _sk$ \/META-INF\/ LICENSE$ \.xml$ CHANGELOG\.md$ app/platform/fabric/e2e-test" +excluded_paths="\.ico$ \.jpg$ \.json$ \.png$ \.svg$ \.tx$ \.crt$ \.ya*ml$ \.key$ \.pem$ _sk$ \/META-INF\/ LICENSE$ \.xml$ CHANGELOG\.md$ app/platform/fabric/e2e-test go\.sum$" for check_file in ${check_files}; do